hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
add2d1ba2845d15ca3d6cdbcf7ac080bbfa2c027.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sort.h"
__global__
void gpu_build_pred(unsigned int* const d_out,
unsigned int* const d_in,
const size_t numElems,
unsigned int bit_mask,
unsigned int zero_or_one)
{
unsigned int glbl_t_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (glbl_t_idx >= numElems)
return;
unsigned int curr_elem = d_in[glbl_t_idx];
// predicate is true if result is 0
unsigned int pred = curr_elem & bit_mask;
unsigned int pred_result = zero_or_one ? 0 : 1;
if (pred == bit_mask)
{
pred_result = zero_or_one ? 1 : 0;
}
d_out[glbl_t_idx] = pred_result;
__syncthreads();
unsigned int dummy = d_out[glbl_t_idx];
}
__global__
void gpu_scatter_elems(unsigned int* const d_out,
unsigned int* const d_in,
unsigned int* const d_preds,
unsigned int* const d_scanned_preds,
unsigned int* const d_out_offset,
const size_t numElems,
unsigned int zero_or_one)
{
unsigned int glbl_t_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (glbl_t_idx >= numElems || d_preds[glbl_t_idx] == 0)
{
return;
}
unsigned int d_out_idx = d_scanned_preds[glbl_t_idx];
// offset the addresses with total sum of predicate
// array when working with 1 bits
if (zero_or_one == 1)
d_out_idx = d_out_idx + *d_out_offset;
unsigned int curr_val = d_in[glbl_t_idx];
d_out[d_out_idx] = curr_val;
}
void radix_sort(unsigned int* const d_out,
unsigned int* const d_in,
unsigned int* const d_preds,
unsigned int* const d_scanned_preds,
const size_t numElems)
{
unsigned int block_sz = 1024;
unsigned int grid_sz = (unsigned int)ceil(float(numElems) / float(block_sz));
unsigned int* d_scatter_offset;
checkCudaErrors(hipMalloc(&d_scatter_offset, sizeof(unsigned int)));
// Do this for every bit, from LSB to MSB
for (unsigned int sw = 0; sw < (sizeof(unsigned int) * 8); ++sw)
{
for (unsigned int bit = 0; bit <= 1; ++bit)
{
unsigned int bit_mask = 1 << sw;
// Build predicate array
hipLaunchKernelGGL(( gpu_build_pred), dim3(grid_sz), dim3(block_sz), 0, 0, d_preds, d_in, numElems, bit_mask, bit);
// Scan predicate array
// If working with 0's, make sure the total sum of the predicate
// array is recorded for determining the offset of the 1's
if (bit == 0)
sum_scan_blelloch(d_scanned_preds, d_scatter_offset, d_preds, numElems);
else
sum_scan_blelloch(d_scanned_preds, NULL, d_preds, numElems);
// Scatter d_in's elements to their new locations in d_out
// Use predicate array to figure out which threads will move
// Use scanned predicate array to figure out the locations
hipLaunchKernelGGL(( gpu_scatter_elems), dim3(grid_sz), dim3(block_sz), 0, 0, d_out, d_in, d_preds, d_scanned_preds, d_scatter_offset, numElems, bit);
}
// Copy d_out to d_in in preparation for next significant bit
checkCudaErrors(hipMemcpy(d_in, d_out, sizeof(unsigned int) * numElems, hipMemcpyDeviceToDevice));
}
checkCudaErrors(hipFree(d_scatter_offset));
}
| add2d1ba2845d15ca3d6cdbcf7ac080bbfa2c027.cu | #include "sort.h"
__global__
void gpu_build_pred(unsigned int* const d_out,
unsigned int* const d_in,
const size_t numElems,
unsigned int bit_mask,
unsigned int zero_or_one)
{
unsigned int glbl_t_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (glbl_t_idx >= numElems)
return;
unsigned int curr_elem = d_in[glbl_t_idx];
// predicate is true if result is 0
unsigned int pred = curr_elem & bit_mask;
unsigned int pred_result = zero_or_one ? 0 : 1;
if (pred == bit_mask)
{
pred_result = zero_or_one ? 1 : 0;
}
d_out[glbl_t_idx] = pred_result;
__syncthreads();
unsigned int dummy = d_out[glbl_t_idx];
}
__global__
void gpu_scatter_elems(unsigned int* const d_out,
unsigned int* const d_in,
unsigned int* const d_preds,
unsigned int* const d_scanned_preds,
unsigned int* const d_out_offset,
const size_t numElems,
unsigned int zero_or_one)
{
unsigned int glbl_t_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (glbl_t_idx >= numElems || d_preds[glbl_t_idx] == 0)
{
return;
}
unsigned int d_out_idx = d_scanned_preds[glbl_t_idx];
// offset the addresses with total sum of predicate
// array when working with 1 bits
if (zero_or_one == 1)
d_out_idx = d_out_idx + *d_out_offset;
unsigned int curr_val = d_in[glbl_t_idx];
d_out[d_out_idx] = curr_val;
}
void radix_sort(unsigned int* const d_out,
unsigned int* const d_in,
unsigned int* const d_preds,
unsigned int* const d_scanned_preds,
const size_t numElems)
{
unsigned int block_sz = 1024;
unsigned int grid_sz = (unsigned int)ceil(float(numElems) / float(block_sz));
unsigned int* d_scatter_offset;
checkCudaErrors(cudaMalloc(&d_scatter_offset, sizeof(unsigned int)));
// Do this for every bit, from LSB to MSB
for (unsigned int sw = 0; sw < (sizeof(unsigned int) * 8); ++sw)
{
for (unsigned int bit = 0; bit <= 1; ++bit)
{
unsigned int bit_mask = 1 << sw;
// Build predicate array
gpu_build_pred<<<grid_sz, block_sz>>>(d_preds, d_in, numElems, bit_mask, bit);
// Scan predicate array
// If working with 0's, make sure the total sum of the predicate
// array is recorded for determining the offset of the 1's
if (bit == 0)
sum_scan_blelloch(d_scanned_preds, d_scatter_offset, d_preds, numElems);
else
sum_scan_blelloch(d_scanned_preds, NULL, d_preds, numElems);
// Scatter d_in's elements to their new locations in d_out
// Use predicate array to figure out which threads will move
// Use scanned predicate array to figure out the locations
gpu_scatter_elems<<<grid_sz, block_sz>>>(d_out, d_in, d_preds, d_scanned_preds, d_scatter_offset, numElems, bit);
}
// Copy d_out to d_in in preparation for next significant bit
checkCudaErrors(cudaMemcpy(d_in, d_out, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToDevice));
}
checkCudaErrors(cudaFree(d_scatter_offset));
}
|
5a63ab4715618d6d0c4103ac79ed18d4a05ba0fb.hip | // !!! This is a file automatically generated by hipify!!!
/*
Francisco Rodriguez Jimenez
[email protected]
nvcc - The NVIDIA CUDA Compiler
cuobjdump - The NVIDIA CUDA Object Utility
nvdisasm - The NVIDIA CUDA disassembler
nvprune - The NVIDIA CUDA Prune Tool
nsight - NVIDIA NSight, Eclipse Edition
nvvp - The NVIDIA CUDA Visual Profiler
nvprof - The NVIDIA CUDA Command-Line Profiler
cuda-memcheck - The NVIDIA CUDA Check Tool
*/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include "hip/hip_runtime.h"
using namespace std;
__host__ void check_CUDA_Error(const char *mensaje){
hipError_t error;
hipDeviceSynchronize();
error = hipGetLastError();
if(error != hipSuccess){
printf("ERROR %d: %s (%s)\n", error, hipGetErrorString(error), mensaje);
exit(EXIT_FAILURE);
}
}
__global__ void reduceSum(int *d_V, int *Out, int N, int smen){
extern __shared__ int sdata[];
int tid = threadIdx.x;
int i = blockIdx.x * (blockDim.x*2) + threadIdx.x;
// printf("\nTAM MEMORIA: %d | tid: %d -> id: %d",smen, tid, i);
sdata[tid] = ((i < N/2) ? d_V[i] + d_V[i+blockDim.x] : 0.0f);
__syncthreads();
for (int s = (blockDim.x/2); s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if(tid == 0){
Out[blockIdx.x] = sdata[0];
}
}
int main(int argc, char** argv){
if(argc != 2){
cout << "Error de sintaxis: ejer8 <TAM>" << endl;
return(EXIT_FAILURE);
}
const int TAM = atoi(argv[1]);
//Punteros memoria host
int *vector_entrada, *host_o;
//Punteros memoria device
int *device_i, *device_o;
//Reserva de memoria host
vector_entrada = new int[TAM];
//Reserva de memoria device
hipMalloc((void **) &device_i, TAM * sizeof(int));
check_CUDA_Error("Error en la reserva del device");
//Inicializacin vector
for(int i = 0 ; i < TAM; ++i){
vector_entrada[i] = 1;
}
cout << "VECTOR ENTRADA: " << endl;
for(int i = 0 ; i < TAM; ++i){
cout << vector_entrada[i] << " ";
}
//Copia de host a device
hipMemcpy(device_i, vector_entrada, sizeof(int)*TAM, hipMemcpyHostToDevice);
check_CUDA_Error("Errir en la copia del host al device");
//Preparo y lanzo el kernel
dim3 threadsPerBlock(TAM);
dim3 numBlocks(ceil((float)TAM / threadsPerBlock.x));
int smemSize = threadsPerBlock.x * sizeof(int);
hipMalloc((void **) &device_o, numBlocks.x * sizeof(int));
host_o = new int[numBlocks.x];
hipLaunchKernelGGL(( reduceSum), dim3(numBlocks), dim3(threadsPerBlock), smemSize, 0, device_i, device_o, TAM, threadsPerBlock.x);
hipDeviceSynchronize();
//Copio el resultado de device a host
hipMemcpy(host_o, device_o, sizeof(int)*numBlocks.x, hipMemcpyDeviceToHost);
int suma = 0;
cout << "\nVECTOR RESULTADO: " << endl;
for(int i = 0 ; i < numBlocks.x; ++i){
cout << host_o[i] << " ";
suma += host_o[i];
}
cout << "\n.....................\nRESULTADO FINAL: " << suma << endl;
delete [] vector_entrada;
delete [] host_o;
hipFree(device_i);
hipFree(device_o);
return EXIT_SUCCESS;
}
| 5a63ab4715618d6d0c4103ac79ed18d4a05ba0fb.cu | /*
Francisco Rodriguez Jimenez
[email protected]
nvcc - The NVIDIA CUDA Compiler
cuobjdump - The NVIDIA CUDA Object Utility
nvdisasm - The NVIDIA CUDA disassembler
nvprune - The NVIDIA CUDA Prune Tool
nsight - NVIDIA NSight, Eclipse Edition
nvvp - The NVIDIA CUDA Visual Profiler
nvprof - The NVIDIA CUDA Command-Line Profiler
cuda-memcheck - The NVIDIA CUDA Check Tool
*/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include "cuda_runtime.h"
using namespace std;
__host__ void check_CUDA_Error(const char *mensaje){
cudaError_t error;
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess){
printf("ERROR %d: %s (%s)\n", error, cudaGetErrorString(error), mensaje);
exit(EXIT_FAILURE);
}
}
__global__ void reduceSum(int *d_V, int *Out, int N, int smen){
extern __shared__ int sdata[];
int tid = threadIdx.x;
int i = blockIdx.x * (blockDim.x*2) + threadIdx.x;
// printf("\nTAM MEMORIA: %d | tid: %d -> id: %d",smen, tid, i);
sdata[tid] = ((i < N/2) ? d_V[i] + d_V[i+blockDim.x] : 0.0f);
__syncthreads();
for (int s = (blockDim.x/2); s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if(tid == 0){
Out[blockIdx.x] = sdata[0];
}
}
int main(int argc, char** argv){
if(argc != 2){
cout << "Error de sintaxis: ejer8 <TAM>" << endl;
return(EXIT_FAILURE);
}
const int TAM = atoi(argv[1]);
//Punteros memoria host
int *vector_entrada, *host_o;
//Punteros memoria device
int *device_i, *device_o;
//Reserva de memoria host
vector_entrada = new int[TAM];
//Reserva de memoria device
cudaMalloc((void **) &device_i, TAM * sizeof(int));
check_CUDA_Error("Error en la reserva del device");
//Inicialización vector
for(int i = 0 ; i < TAM; ++i){
vector_entrada[i] = 1;
}
cout << "VECTOR ENTRADA: " << endl;
for(int i = 0 ; i < TAM; ++i){
cout << vector_entrada[i] << " ";
}
//Copia de host a device
cudaMemcpy(device_i, vector_entrada, sizeof(int)*TAM, cudaMemcpyHostToDevice);
check_CUDA_Error("Errir en la copia del host al device");
//Preparo y lanzo el kernel
dim3 threadsPerBlock(TAM);
dim3 numBlocks(ceil((float)TAM / threadsPerBlock.x));
int smemSize = threadsPerBlock.x * sizeof(int);
cudaMalloc((void **) &device_o, numBlocks.x * sizeof(int));
host_o = new int[numBlocks.x];
reduceSum<<<numBlocks, threadsPerBlock, smemSize>>>(device_i, device_o, TAM, threadsPerBlock.x);
cudaDeviceSynchronize();
//Copio el resultado de device a host
cudaMemcpy(host_o, device_o, sizeof(int)*numBlocks.x, cudaMemcpyDeviceToHost);
int suma = 0;
cout << "\nVECTOR RESULTADO: " << endl;
for(int i = 0 ; i < numBlocks.x; ++i){
cout << host_o[i] << " ";
suma += host_o[i];
}
cout << "\n.....................\nRESULTADO FINAL: " << suma << endl;
delete [] vector_entrada;
delete [] host_o;
cudaFree(device_i);
cudaFree(device_o);
return EXIT_SUCCESS;
}
|
db7da2d0fb39f4c0871c1b96f621c138f2e2c46d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Winter Semester 2015/2016, March 15 - April 15
// ###
// ###
#include "helper.h"
#include <iostream>
#include <stdio.h>
using namespace std;
// uncomment to use the camera
// #define CAMERA
// clamp an index to the min and max values specified
int clamp(int idx, int min, int max);
float* gaussian_kernel(int kernel_size, float sigma) {
float *kernel = new float[kernel_size * kernel_size];
float mid = (float)kernel_size/2.f; // coordinate value of the center ok the kernel
float dist_sq;
float norm_sum = 0; // normalization factor
for (int i = 0; i < kernel_size; i++) {
for (int j = 0; j < kernel_size; j++) {
dist_sq = powf((float)i + 0.5 - mid, 2) + powf((float)j + 0.5 - mid, 2);
kernel[i + kernel_size * j] = expf( - dist_sq / (2*powf(sigma, 2)) );
norm_sum += kernel[i + kernel_size * j];
}
}
for (int i = 0; i < kernel_size; i++) {
for (int j = 0; j < kernel_size; j++) {
kernel[i + kernel_size * j] /= norm_sum;
// cout << kernel[i + kernel_size *j] << endl;
}
}
return kernel;
}
void convolution(float *imgIn, float *imgOut, float *kernel, int w, int h, int nc, int ks) {
int img_x, img_y;
// for every channel
for (int c = 0; c < nc; c++) {
// for every pixel in the image
for (int i = 0; i < w; i++){
for (int j = 0; j < h; j++) {
// for every pixel in the kernel
for (int k = 0; k < ks; k++) {
for (int l = 0; l < ks; l++) {
img_x = clamp(i + k - (ks/2 + 1), 0, w-1);
img_y = clamp(j + l - (ks/2 + 1), 0, h-1);
imgOut[i + w*j + w*h*c] += imgIn[img_x + w*img_y + w*h*c] * kernel[k + ks*l];
}
}
}
}
}
}
__global__ void gpu_convolution(float *imgIn, float *imgOut, float *kernel, int w, int h, int nc, int ks) {
// indexes for the kernel
int img_x, img_y;
// calculate center pixel corresponding to thread
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// for every channel
for (int c = 0; c < nc; ++c) {
// for every pixel in the kernel
for (int k = 0; k < ks; ++k) {
for (int l = 0; l < ks; ++l) {
img_x = min(w-1, max(0, x + k - (ks/2 + 1)));
img_y = min(h-1, max(0, y + l - (ks/2 + 1)));
if (x < w && y < h) imgOut[x + w*y + w*h*c] += imgIn[img_x + w*img_y + w*h*c] * kernel[k + ks*l];
}
}
}
}
__device__ int d_clamp(int ind, int minval, int maxval) {
return min(max(minval, ind), maxval);
}
__global__ void gradient(float *d_imgIn, float *d_imgGrad_x, float *d_imgGrad_y, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int ind = x + w * y;
if ( x < w && y < h ) {
for (int c = 0; c < nc; c++) {
if (x + 1 < w) d_imgGrad_x[ind + c*w*h] = d_imgIn[ind + c*w*h + 1] - d_imgIn[ind + c*w*h]; // derivative along x
else d_imgGrad_x[ind + c*w*h] = 0;
if (y + 1 < h) d_imgGrad_y[ind + c*w*h] = d_imgIn[ind + c*w*h + w] - d_imgIn[ind + c*w*h]; // derivative along y
else d_imgGrad_y[ind + c*w*h] = 0;
}
}
}
__global__ void ri_gradient(float *d_imgIn, float *d_imgGrad_x, float *d_imgGrad_y, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
for (int c = 0; c < nc; c++) {
if (x < w && y < h) {
d_imgGrad_x[x + y*w + c*w*h] =
( 3*d_imgIn[d_clamp(x+1,0,w-1) + w*d_clamp(y-1,0,h-1) + c*w*h]
+ 10*d_imgIn[d_clamp(x+1,0,w-1) + w*d_clamp(y,0,h-1) + c*w*h]
+ 3*d_imgIn[d_clamp(x+1,0,w-1) + w*d_clamp(y+1,0,h-1) + c*w*h]
- 3*d_imgIn[d_clamp(x-1,0,w-1) + w*d_clamp(y-1,0,h-1) + c*w*h]
- 10*d_imgIn[d_clamp(x-1,0,w-1) + w*d_clamp(y,0,h-1) + c*w*h]
- 3*d_imgIn[d_clamp(x-1,0,w-1) + w*d_clamp(y+1,0,h-1) + c*w*h] ) / 32.f; // derivative along x
d_imgGrad_y[x + y*w + c*w*h] =
( 3*d_imgIn[d_clamp(x-1,0,w-1) + w*d_clamp(y+1,0,h-1) + c*w*h]
+ 10*d_imgIn[d_clamp(x,0,w-1) + w*d_clamp(y+1,0,h-1) + c*w*h]
+ 3*d_imgIn[d_clamp(x+1,0,w-1) + w*d_clamp(y+1,0,h-1) + c*w*h]
- 3*d_imgIn[d_clamp(x-1,0,w-1) + w*d_clamp(y-1,0,h-1) + c*w*h]
- 10*d_imgIn[d_clamp(x,0,w-1) + w*d_clamp(y-1,0,h-1) + c*w*h]
- 3*d_imgIn[d_clamp(x+1,0,w-1) + w*d_clamp(y-1,0,h-1) + c*w*h] ) / 32.f; // derivative along x
// d_imgGrad[x + y*w + (size_t)c*w*h + w*h*nc] = d_imgIn[ind + (size_t)c*w*h + w] - d_imgIn[ind + (size_t)c*w*h]; // derivative along y
}
}
}
__global__ void divergence_2d(float *d_imgV1, float *d_imgV2, float *d_div, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
float dx;
float dy;
size_t ind = x + w * y;
if ( x < w && y < h ) {
for (int c = 0; c < nc; c++) {
if (x > 0) dx = d_imgV1[ind + (size_t)c*w*h] - d_imgV1[ind + (size_t)c*w*h - 1]; // derivative along x
else dx = d_imgV1[ind + (size_t)c*w*h];
if (y > 0) dy = d_imgV2[ind + (size_t)c*w*h] - d_imgV2[ind + (size_t)c*w*h - w]; // derivative along y
else dy = d_imgV2[ind + (size_t)c*w*h];
d_div[ind + (size_t)c*w*h] = dx + dy;
}
}
}
__global__ void gpu_m_product(float *imgV1, float *imgV2, float *imgM11, float *imgM12, float *imgM22, int w, int h, int nc) {
// calculate center pixel corresponding to thread
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// for every channel
for (int c = 0; c < nc; c++) {
if (x < w && y < h) {
imgM11[x + y*w] += imgV1[x + y*w + c*w*h]*imgV1[x + y*w + c*w*h];
imgM12[x + y*w] += imgV1[x + y*w + c*w*h]*imgV2[x + y*w + c*w*h];
imgM22[x + y*w] += imgV2[x + y*w + c*w*h]*imgV2[x + y*w + c*w*h];
}
}
}
__device__ void eigval(float *eigenval, float m11, float m12, float m21, float m22) {
// implementing solution for: lambda**2 + lambda*(-m22-m11) + (a11*a22)
// a = 1
float b = - m11 - m22;
float c = m11*m22 - m12*m21;
eigenval[0] = (-b - sqrtf(b*b - 4*c))/2;
eigenval[1] = (-b + sqrtf(b*b - 4*c))/2;
}
// calculate the norm of a 2D vector
__host__ __device__ float norm_2d(float v1, float v2) {
return sqrtf(v1*v1 + v2*v2);
}
// only valid for 2d matrices
__device__ void eig(float *eigenval, float *eigenvec, float m11, float m12, float m21, float m22) {
// eigenvalues
// implementing solution for: lambda**2 + lambda*(-m22-m11) + (a11*a22)
// a*lambda**2 + b*lambda + c = 0
float b = - m11 - m22;
float c = m11*m22 - m12*m21;
eigenval[0] = (-b - sqrtf(b*b - 4.f*c))/2.f;
eigenval[1] = (-b + sqrtf(b*b - 4.f*c))/2.f;
// eigenvectors
float a; float d;
for (int i = 0; i < 2; i++) {
// now abcd are the elements of the matrix (A-lambda*I)
a = m11 - eigenval[i];
b = m12;
c = m21;
d = m22 - eigenval[i];
if ( a*a > 0 ) {
if ( b*b > 0 ) {
eigenvec[0 + 2*i] = 1 / norm_2d(1.f, a/b);
eigenvec[1 + 2*i] = -(a/b) / norm_2d(1.f, a/b);
}
} else if ( c*c > 0 ) {
if ( d*d > 0 ) {
eigenvec[0 + 2*i] = 1 / norm_2d(1.f, c/d);
eigenvec[1 + 2*i] = -(c/d) / norm_2d(1.f, c/d);
}
} else {
printf("!!! eig ");
eigenvec[0 + 2*i] = 1;
eigenvec[1 + 2*i] = 1;
}
}
}
// only 2x2 matrices
// returns the G tensor for anisotropic Difussion
__device__ void G_tensor(float *G, float m11, float m12, float m21, float m22, float alpha, float C) {
// get eigenvalues
float eigenval[2];
float eigenvec[4];
eig(eigenval, eigenvec, m11, m12, m21, m22);
// mu factors
float mu[2];
mu[0] = alpha;
if ( (eigenval[0] - eigenval[1])*(eigenval[0] - eigenval[1]) < 1e-10 ) {
printf("!!! muuu ");
mu[1] = alpha;
}
else {
mu[1] = alpha + (1 - alpha)*exp( - C /( (eigenval[0] - eigenval[1])*(eigenval[0] - eigenval[1]) ) );
if (blockIdx.x == 0 && blockIdx.y ==0) printf("%4.2f", mu[1]);
}
// calculate G
// this is originating a nan
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
G[i + 2*j] = mu[0]*eigenvec[i]*eigenvec[j] + mu[1]*eigenvec[i + 2]*eigenvec[j + 2];
}
}
}
__global__ void calc_G_tensor(float *imgG, float *imgT11, float *imgT12, float *imgT22, int w, int h, int nc, float alpha, float C) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int idx;
float G[4];// = {0};
// for every channel
for (int c = 0; c < nc; c++) {
idx = x + y*w;
G_tensor(G, imgT11[idx], imgT12[idx], imgT12[idx], imgT22[idx], alpha, C);
// for each of the 4 tensor components
for (int i = 0; i < 4; i++) {
// if ( c==0 && x%10==0 && y%10==0 && i==0) printf("%4.2f \n", G[0]);
imgG[idx + c*w*h + i*nc*w*h] = G[i];
}
}
}
}
__global__ void tensor_scaling(float *imgV1, float *imgV2, float *imgG, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int idx;
float v1, v2;
// for every channel
for (int c = 0; c < nc; c++) {
idx = x + y*w;
v1 = imgV1[idx + c*w*h];
v2 = imgV2[idx + c*w*h];
imgV1[idx + c*w*h] = imgG[idx + c*w*h + 0*nc*w*h]*v1 + imgG[idx + c*w*h + 1*nc*w*h]*v2;
imgV2[idx + c*w*h] = imgG[idx + c*w*h + 2*nc*w*h]*v1 + imgG[idx + c*w*h + 3*nc*w*h]*v2;
}
}
}
__global__ void time_step(float *imgIn, float *imgGrad, float tau, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int idx;
// for every channel
for (int c = 0; c < nc; c++) {
idx = x + y*w + c*w*h;
imgIn[idx] += tau*imgGrad[idx];
}
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// load the value for sigma if "-sigma" is specified
float sigma = 0.5;
getParam("sigma", sigma, argc, argv);
cout << "sigma: " << sigma << " with smoothing kernel size of 2*ceil(3*sigma) + 1" << endl;
int kernel_size_sigma = 2*ceil(3*sigma) + 1; // directly defined by sigma
// load the value for ro if "-ro" is specified
float ro = 3;
getParam("ro", ro, argc, argv);
cout << "ro: " << ro << " with averaging kernel size of 2*ceil(3*ro) + 1" << endl;
int kernel_size_ro = 2*ceil(3*sigma) + 1; // directly defined by sigma
// G diffusion tensor parameter
float alpha = 0.01;
getParam("alpha", alpha, argc, argv);
cout << "alpha: " << alpha << endl;
// G diffusion tensor parameter
float C = 0.000005;
getParam("C", C, argc, argv);
cout << "C: " << C << endl;
// number of time steps
int N = 10;
getParam("N", N, argc, argv);
cout << "N: " << N << endl;
// size of time steps
float tau = 0.25;
getParam("tau", tau, argc, argv);
cout << "tau: " << tau << endl;
cout << "tau x N = " << tau*N << ", if anisotropic is equivalent to sigma = " << sqrt(2*tau*N) << endl;
cout << "--------------" << endl; // save our eyes
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// Set the OpenCV kernel display image
cv::Mat mKer(kernel_size_sigma, kernel_size_sigma, CV_32FC1);
// structure tensor grayscale Output image
cv::Mat mOutMii(h,w,CV_32FC1); // mOutMii will have just one channel
// debugging aux
cv::Mat mAux(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// allocate raw output array for the GPU
float *imgOutTii = new float[(size_t)w*h*mOutMii.channels()];
// auxiliar for debugging
float *imgAux = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
float *kernel_sigma = gaussian_kernel(kernel_size_sigma, sigma);
float *kernel_ro = gaussian_kernel(kernel_size_ro, ro);
// #ifndef CAMERA
// // CPU time
// Timer timer; timer.start();
// // ###
// // ###
// convolution(imgIn, imgOut, kernel, w, h, nc, kernel_size);
// // cout << "-----------" << endl;
// // for (int i = 0; i < kernel_size; i++) {
// // for (int j = 0; j < kernel_size; j++) {
// // cout << kernel[i + kernel_size *j] << endl;
// // }
// // }
// // ###
// // ###
// timer.end(); float t = timer.get(); // elapsed time in seconds
// cout << "time: " << t*1000 << " ms" << endl;
// #endif
// GPU time
Timer timerg; timerg.start();
// ###
// ###
// initialize device memory
float *d_kernel_sigma = NULL;
float *d_kernel_ro = NULL;
float *d_imgIn = NULL;
float *d_imgG = NULL;
float *d_imgTimeGrad = NULL;
float *d_imgV1 = NULL;
float *d_imgV2 = NULL;
float *d_imgS = NULL;
float *d_imgM11 = NULL;
float *d_imgM12 = NULL;
float *d_imgM22 = NULL;
float *d_imgT11 = NULL;
float *d_imgT12 = NULL;
float *d_imgT22 = NULL;
float *d_imgOut = NULL;
hipMalloc( &d_kernel_sigma, kernel_size_sigma*kernel_size_sigma*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_kernel_ro, kernel_size_ro*kernel_size_ro*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgIn, w*h*nc*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgG, 4*w*h*nc*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgTimeGrad, w*h*nc*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgV1, w*h*nc*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgV2, w*h*nc*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgS, w*h*nc*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgM11, w*h*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgM12, w*h*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgM22, w*h*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgT11, w*h*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgT12, w*h*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgT22, w*h*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgOut, w*h*nc*sizeof(float) ); CUDA_CHECK;
hipMemset( d_imgS, 0, w*h*nc*sizeof(float) ); CUDA_CHECK;
hipMemset( d_imgM11, 0, w*h*sizeof(float) ); CUDA_CHECK;
hipMemset( d_imgM12, 0, w*h*sizeof(float) ); CUDA_CHECK;
hipMemset( d_imgM22, 0, w*h*sizeof(float) ); CUDA_CHECK;
hipMemset( d_imgT11, 0, w*h*sizeof(float) ); CUDA_CHECK;
hipMemset( d_imgT12, 0, w*h*sizeof(float) ); CUDA_CHECK;
hipMemset( d_imgT22, 0, w*h*sizeof(float) ); CUDA_CHECK;
hipMemset( d_imgOut, 0, w*h*nc*sizeof(float) ); CUDA_CHECK;
// copy image and kernel to device
hipMemcpy( d_kernel_sigma, kernel_sigma, kernel_size_sigma*kernel_size_sigma*sizeof(float), hipMemcpyHostToDevice ); CUDA_CHECK;
hipMemcpy( d_kernel_ro, kernel_ro, kernel_size_ro*kernel_size_ro*sizeof(float), hipMemcpyHostToDevice ); CUDA_CHECK;
hipMemcpy( d_imgIn, imgIn, w*h*nc*sizeof(float), hipMemcpyHostToDevice ); CUDA_CHECK;
// launch kernel
dim3 block = dim3(32,8,1);
dim3 grid = dim3( (w + block.x -1)/block.x, (h + block.y -1)/block.y, 1);
// G tensor is only calculated once
hipLaunchKernelGGL(( gpu_convolution) , dim3(grid),dim3(block), 0, 0, d_imgIn, d_imgS, d_kernel_sigma, w, h, nc, kernel_size_ro); CUDA_CHECK;
hipLaunchKernelGGL(( gradient) , dim3(grid),dim3(block), 0, 0, d_imgS, d_imgV1, d_imgV2, w, h, nc); CUDA_CHECK;
hipLaunchKernelGGL(( gpu_m_product) , dim3(grid),dim3(block), 0, 0, d_imgV1, d_imgV2, d_imgM11, d_imgM12, d_imgM22, w, h, nc); CUDA_CHECK;
hipLaunchKernelGGL(( gpu_convolution) , dim3(grid),dim3(block), 0, 0, d_imgM11, d_imgT11, d_kernel_ro, w, h, 1, kernel_size_ro); CUDA_CHECK;
hipLaunchKernelGGL(( gpu_convolution) , dim3(grid),dim3(block), 0, 0, d_imgM12, d_imgT12, d_kernel_ro, w, h, 1, kernel_size_ro); CUDA_CHECK;
hipLaunchKernelGGL(( gpu_convolution) , dim3(grid),dim3(block), 0, 0, d_imgM22, d_imgT22, d_kernel_ro, w, h, 1, kernel_size_ro); CUDA_CHECK;
hipLaunchKernelGGL(( calc_G_tensor) , dim3(grid),dim3(block), 0, 0, d_imgG, d_imgT11, d_imgT12, d_imgT22, w, h, nc, alpha, C);
// for a lot of time steps
for (int n = 0; n < N; n++) {
hipLaunchKernelGGL(( gradient) , dim3(grid),dim3(block), 0, 0, d_imgS, d_imgV1, d_imgV2, w, h, nc); CUDA_CHECK;
hipLaunchKernelGGL(( tensor_scaling) , dim3(grid),dim3(block), 0, 0, d_imgV1, d_imgV2, d_imgG, w, h, nc); CUDA_CHECK;
hipLaunchKernelGGL(( divergence_2d) , dim3(grid),dim3(block), 0, 0, d_imgV1, d_imgV2, d_imgTimeGrad, w, h, nc); CUDA_CHECK;
hipLaunchKernelGGL(( time_step) , dim3(grid),dim3(block), 0, 0, d_imgS, d_imgTimeGrad, tau, w, h, nc);
}
// ###
// ###
timerg.end(); float tg = timerg.get(); // elapsed time in seconds
#ifndef CAMERA
cout << "time: " << tg*1000 << " ms" << endl;
#endif
#ifndef CAMERA
double minval, maxval;
// // show structure tensor
// hipMemcpy( imgOutTii, d_imgT11, w*h*sizeof(float), hipMemcpyDeviceToHost ); CUDA_CHECK;
// convert_layered_to_mat(mOutMii, imgOutTii);
// cv::minMaxLoc(mOutMii, &minval, &maxval);
// showImage("GPU M11 (Structure Tensor)", mOutMii/maxval, 50 + w, 100);
//
// hipMemcpy( imgOutTii, d_imgT12, w*h*sizeof(float), hipMemcpyDeviceToHost ); CUDA_CHECK;
// convert_layered_to_mat(mOutMii, imgOutTii);
// cv::minMaxLoc(mOutMii, &minval, &maxval);
// showImage("GPU M12 (Structure Tensor)", mOutMii/maxval, 50 + 2*w, 100);
//
// hipMemcpy( imgOutTii, d_imgT22, w*h*sizeof(float), hipMemcpyDeviceToHost ); CUDA_CHECK;
// convert_layered_to_mat(mOutMii, imgOutTii);
// cv::minMaxLoc(mOutMii, &minval, &maxval);
// showImage("GPU M22 (Structure Tensor)", mOutMii/maxval, 50 + 3*w, 100);
#endif
// show input image
showImage("Input", mIn, 50, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
hipMemcpy( imgOut, d_imgS, nc*w*h*sizeof(float), hipMemcpyDeviceToHost ); CUDA_CHECK;
convert_layered_to_mat(mOut, imgOut);
showImage("GPU Result", mOut, 50 + w/2, 100 + h);
// ### Display your own output images here as needed
// show output image: first convert to interleaved opencv format from the layered raw array
hipMemcpy( imgAux, d_imgG, nc*w*h*sizeof(float), hipMemcpyDeviceToHost ); CUDA_CHECK;
convert_layered_to_mat(mAux, imgAux);
cv::minMaxLoc(mAux, &minval, &maxval);
showImage("GPU Aux image", mAux, 50 + w/2 + w, 100 + h);
// show kernel image
convert_layered_to_mat(mKer, kernel_sigma);
// double min, max;
// cv::minMaxLoc(mKer, &min, &max);
showImage("Kernel sigma", mKer/kernel_sigma[kernel_size_sigma*kernel_size_sigma/2], 50 - kernel_size_sigma, 100); // mKer is upscaled with its largest value for visualization
// free device memory
hipFree(d_kernel_sigma);
hipFree(d_kernel_ro);
hipFree(d_imgIn);
hipFree(d_imgTimeGrad);
hipFree(d_imgG);
hipFree(d_imgV1);
hipFree(d_imgV2);
hipFree(d_imgS);
hipFree(d_imgM11);
hipFree(d_imgM12);
hipFree(d_imgM22);
hipFree(d_imgT11);
hipFree(d_imgT12);
hipFree(d_imgT22);
hipFree(d_imgOut);
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
delete[] imgOutTii;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
// clamp an index to the min and max values specified
int clamp(int idx, int minval, int maxval) {
// int clamped_idx = idx;
// if (idx < min) clamped_idx = min;
// else if (idx > max) clamped_idx = max;
// return clamped_idx;
return min(maxval, max(idx, minval));
}
| db7da2d0fb39f4c0871c1b96f621c138f2e2c46d.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Winter Semester 2015/2016, March 15 - April 15
// ###
// ###
#include "helper.h"
#include <iostream>
#include <stdio.h>
using namespace std;
// uncomment to use the camera
// #define CAMERA
// clamp an index to the min and max values specified
int clamp(int idx, int min, int max);
float* gaussian_kernel(int kernel_size, float sigma) {
float *kernel = new float[kernel_size * kernel_size];
float mid = (float)kernel_size/2.f; // coordinate value of the center ok the kernel
float dist_sq;
float norm_sum = 0; // normalization factor
for (int i = 0; i < kernel_size; i++) {
for (int j = 0; j < kernel_size; j++) {
dist_sq = powf((float)i + 0.5 - mid, 2) + powf((float)j + 0.5 - mid, 2);
kernel[i + kernel_size * j] = expf( - dist_sq / (2*powf(sigma, 2)) );
norm_sum += kernel[i + kernel_size * j];
}
}
for (int i = 0; i < kernel_size; i++) {
for (int j = 0; j < kernel_size; j++) {
kernel[i + kernel_size * j] /= norm_sum;
// cout << kernel[i + kernel_size *j] << endl;
}
}
return kernel;
}
void convolution(float *imgIn, float *imgOut, float *kernel, int w, int h, int nc, int ks) {
int img_x, img_y;
// for every channel
for (int c = 0; c < nc; c++) {
// for every pixel in the image
for (int i = 0; i < w; i++){
for (int j = 0; j < h; j++) {
// for every pixel in the kernel
for (int k = 0; k < ks; k++) {
for (int l = 0; l < ks; l++) {
img_x = clamp(i + k - (ks/2 + 1), 0, w-1);
img_y = clamp(j + l - (ks/2 + 1), 0, h-1);
imgOut[i + w*j + w*h*c] += imgIn[img_x + w*img_y + w*h*c] * kernel[k + ks*l];
}
}
}
}
}
}
__global__ void gpu_convolution(float *imgIn, float *imgOut, float *kernel, int w, int h, int nc, int ks) {
// indexes for the kernel
int img_x, img_y;
// calculate center pixel corresponding to thread
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// for every channel
for (int c = 0; c < nc; ++c) {
// for every pixel in the kernel
for (int k = 0; k < ks; ++k) {
for (int l = 0; l < ks; ++l) {
img_x = min(w-1, max(0, x + k - (ks/2 + 1)));
img_y = min(h-1, max(0, y + l - (ks/2 + 1)));
if (x < w && y < h) imgOut[x + w*y + w*h*c] += imgIn[img_x + w*img_y + w*h*c] * kernel[k + ks*l];
}
}
}
}
__device__ int d_clamp(int ind, int minval, int maxval) {
return min(max(minval, ind), maxval);
}
__global__ void gradient(float *d_imgIn, float *d_imgGrad_x, float *d_imgGrad_y, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int ind = x + w * y;
if ( x < w && y < h ) {
for (int c = 0; c < nc; c++) {
if (x + 1 < w) d_imgGrad_x[ind + c*w*h] = d_imgIn[ind + c*w*h + 1] - d_imgIn[ind + c*w*h]; // derivative along x
else d_imgGrad_x[ind + c*w*h] = 0;
if (y + 1 < h) d_imgGrad_y[ind + c*w*h] = d_imgIn[ind + c*w*h + w] - d_imgIn[ind + c*w*h]; // derivative along y
else d_imgGrad_y[ind + c*w*h] = 0;
}
}
}
__global__ void ri_gradient(float *d_imgIn, float *d_imgGrad_x, float *d_imgGrad_y, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
for (int c = 0; c < nc; c++) {
if (x < w && y < h) {
d_imgGrad_x[x + y*w + c*w*h] =
( 3*d_imgIn[d_clamp(x+1,0,w-1) + w*d_clamp(y-1,0,h-1) + c*w*h]
+ 10*d_imgIn[d_clamp(x+1,0,w-1) + w*d_clamp(y,0,h-1) + c*w*h]
+ 3*d_imgIn[d_clamp(x+1,0,w-1) + w*d_clamp(y+1,0,h-1) + c*w*h]
- 3*d_imgIn[d_clamp(x-1,0,w-1) + w*d_clamp(y-1,0,h-1) + c*w*h]
- 10*d_imgIn[d_clamp(x-1,0,w-1) + w*d_clamp(y,0,h-1) + c*w*h]
- 3*d_imgIn[d_clamp(x-1,0,w-1) + w*d_clamp(y+1,0,h-1) + c*w*h] ) / 32.f; // derivative along x
d_imgGrad_y[x + y*w + c*w*h] =
( 3*d_imgIn[d_clamp(x-1,0,w-1) + w*d_clamp(y+1,0,h-1) + c*w*h]
+ 10*d_imgIn[d_clamp(x,0,w-1) + w*d_clamp(y+1,0,h-1) + c*w*h]
+ 3*d_imgIn[d_clamp(x+1,0,w-1) + w*d_clamp(y+1,0,h-1) + c*w*h]
- 3*d_imgIn[d_clamp(x-1,0,w-1) + w*d_clamp(y-1,0,h-1) + c*w*h]
- 10*d_imgIn[d_clamp(x,0,w-1) + w*d_clamp(y-1,0,h-1) + c*w*h]
- 3*d_imgIn[d_clamp(x+1,0,w-1) + w*d_clamp(y-1,0,h-1) + c*w*h] ) / 32.f; // derivative along x
// d_imgGrad[x + y*w + (size_t)c*w*h + w*h*nc] = d_imgIn[ind + (size_t)c*w*h + w] - d_imgIn[ind + (size_t)c*w*h]; // derivative along y
}
}
}
__global__ void divergence_2d(float *d_imgV1, float *d_imgV2, float *d_div, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
float dx;
float dy;
size_t ind = x + w * y;
if ( x < w && y < h ) {
for (int c = 0; c < nc; c++) {
if (x > 0) dx = d_imgV1[ind + (size_t)c*w*h] - d_imgV1[ind + (size_t)c*w*h - 1]; // derivative along x
else dx = d_imgV1[ind + (size_t)c*w*h];
if (y > 0) dy = d_imgV2[ind + (size_t)c*w*h] - d_imgV2[ind + (size_t)c*w*h - w]; // derivative along y
else dy = d_imgV2[ind + (size_t)c*w*h];
d_div[ind + (size_t)c*w*h] = dx + dy;
}
}
}
__global__ void gpu_m_product(float *imgV1, float *imgV2, float *imgM11, float *imgM12, float *imgM22, int w, int h, int nc) {
// calculate center pixel corresponding to thread
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// for every channel
for (int c = 0; c < nc; c++) {
if (x < w && y < h) {
imgM11[x + y*w] += imgV1[x + y*w + c*w*h]*imgV1[x + y*w + c*w*h];
imgM12[x + y*w] += imgV1[x + y*w + c*w*h]*imgV2[x + y*w + c*w*h];
imgM22[x + y*w] += imgV2[x + y*w + c*w*h]*imgV2[x + y*w + c*w*h];
}
}
}
__device__ void eigval(float *eigenval, float m11, float m12, float m21, float m22) {
// implementing solution for: lambda**2 + lambda*(-m22-m11) + (a11*a22)
// a = 1
float b = - m11 - m22;
float c = m11*m22 - m12*m21;
eigenval[0] = (-b - sqrtf(b*b - 4*c))/2;
eigenval[1] = (-b + sqrtf(b*b - 4*c))/2;
}
// calculate the norm of a 2D vector
__host__ __device__ float norm_2d(float v1, float v2) {
return sqrtf(v1*v1 + v2*v2);
}
// only valid for 2d matrices
__device__ void eig(float *eigenval, float *eigenvec, float m11, float m12, float m21, float m22) {
// eigenvalues
// implementing solution for: lambda**2 + lambda*(-m22-m11) + (a11*a22)
// a*lambda**2 + b*lambda + c = 0
float b = - m11 - m22;
float c = m11*m22 - m12*m21;
eigenval[0] = (-b - sqrtf(b*b - 4.f*c))/2.f;
eigenval[1] = (-b + sqrtf(b*b - 4.f*c))/2.f;
// eigenvectors
float a; float d;
for (int i = 0; i < 2; i++) {
// now abcd are the elements of the matrix (A-lambda*I)
a = m11 - eigenval[i];
b = m12;
c = m21;
d = m22 - eigenval[i];
if ( a*a > 0 ) {
if ( b*b > 0 ) {
eigenvec[0 + 2*i] = 1 / norm_2d(1.f, a/b);
eigenvec[1 + 2*i] = -(a/b) / norm_2d(1.f, a/b);
}
} else if ( c*c > 0 ) {
if ( d*d > 0 ) {
eigenvec[0 + 2*i] = 1 / norm_2d(1.f, c/d);
eigenvec[1 + 2*i] = -(c/d) / norm_2d(1.f, c/d);
}
} else {
printf("!!! eig ");
eigenvec[0 + 2*i] = 1;
eigenvec[1 + 2*i] = 1;
}
}
}
// only 2x2 matrices
// returns the G tensor for anisotropic Difussion
__device__ void G_tensor(float *G, float m11, float m12, float m21, float m22, float alpha, float C) {
// get eigenvalues
float eigenval[2];
float eigenvec[4];
eig(eigenval, eigenvec, m11, m12, m21, m22);
// mu factors
float mu[2];
mu[0] = alpha;
if ( (eigenval[0] - eigenval[1])*(eigenval[0] - eigenval[1]) < 1e-10 ) {
printf("!!! muuu ");
mu[1] = alpha;
}
else {
mu[1] = alpha + (1 - alpha)*exp( - C /( (eigenval[0] - eigenval[1])*(eigenval[0] - eigenval[1]) ) );
if (blockIdx.x == 0 && blockIdx.y ==0) printf("%4.2f", mu[1]);
}
// calculate G
// this is originating a nan
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
G[i + 2*j] = mu[0]*eigenvec[i]*eigenvec[j] + mu[1]*eigenvec[i + 2]*eigenvec[j + 2];
}
}
}
__global__ void calc_G_tensor(float *imgG, float *imgT11, float *imgT12, float *imgT22, int w, int h, int nc, float alpha, float C) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int idx;
float G[4];// = {0};
// for every channel
for (int c = 0; c < nc; c++) {
idx = x + y*w;
G_tensor(G, imgT11[idx], imgT12[idx], imgT12[idx], imgT22[idx], alpha, C);
// for each of the 4 tensor components
for (int i = 0; i < 4; i++) {
// if ( c==0 && x%10==0 && y%10==0 && i==0) printf("%4.2f \n", G[0]);
imgG[idx + c*w*h + i*nc*w*h] = G[i];
}
}
}
}
__global__ void tensor_scaling(float *imgV1, float *imgV2, float *imgG, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int idx;
float v1, v2;
// for every channel
for (int c = 0; c < nc; c++) {
idx = x + y*w;
v1 = imgV1[idx + c*w*h];
v2 = imgV2[idx + c*w*h];
imgV1[idx + c*w*h] = imgG[idx + c*w*h + 0*nc*w*h]*v1 + imgG[idx + c*w*h + 1*nc*w*h]*v2;
imgV2[idx + c*w*h] = imgG[idx + c*w*h + 2*nc*w*h]*v1 + imgG[idx + c*w*h + 3*nc*w*h]*v2;
}
}
}
__global__ void time_step(float *imgIn, float *imgGrad, float tau, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int idx;
// for every channel
for (int c = 0; c < nc; c++) {
idx = x + y*w + c*w*h;
imgIn[idx] += tau*imgGrad[idx];
}
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// load the value for sigma if "-sigma" is specified
float sigma = 0.5;
getParam("sigma", sigma, argc, argv);
cout << "sigma: " << sigma << " with smoothing kernel size of 2*ceil(3*sigma) + 1" << endl;
int kernel_size_sigma = 2*ceil(3*sigma) + 1; // directly defined by sigma
// load the value for ro if "-ro" is specified
float ro = 3;
getParam("ro", ro, argc, argv);
cout << "ro: " << ro << " with averaging kernel size of 2*ceil(3*ro) + 1" << endl;
int kernel_size_ro = 2*ceil(3*sigma) + 1; // directly defined by sigma
// G diffusion tensor parameter
float alpha = 0.01;
getParam("alpha", alpha, argc, argv);
cout << "alpha: " << alpha << endl;
// G diffusion tensor parameter
float C = 0.000005;
getParam("C", C, argc, argv);
cout << "C: " << C << endl;
// number of time steps
int N = 10;
getParam("N", N, argc, argv);
cout << "N: " << N << endl;
// size of time steps
float tau = 0.25;
getParam("tau", tau, argc, argv);
cout << "tau: " << tau << endl;
cout << "tau x N = " << tau*N << ", if anisotropic is equivalent to sigma = " << sqrt(2*tau*N) << endl;
cout << "--------------" << endl; // save our eyes
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// Set the OpenCV kernel display image
cv::Mat mKer(kernel_size_sigma, kernel_size_sigma, CV_32FC1);
// structure tensor grayscale Output image
cv::Mat mOutMii(h,w,CV_32FC1); // mOutMii will have just one channel
// debugging aux
cv::Mat mAux(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// allocate raw output array for the GPU
float *imgOutTii = new float[(size_t)w*h*mOutMii.channels()];
// auxiliar for debugging
float *imgAux = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
float *kernel_sigma = gaussian_kernel(kernel_size_sigma, sigma);
float *kernel_ro = gaussian_kernel(kernel_size_ro, ro);
// #ifndef CAMERA
// // CPU time
// Timer timer; timer.start();
// // ###
// // ###
// convolution(imgIn, imgOut, kernel, w, h, nc, kernel_size);
// // cout << "-----------" << endl;
// // for (int i = 0; i < kernel_size; i++) {
// // for (int j = 0; j < kernel_size; j++) {
// // cout << kernel[i + kernel_size *j] << endl;
// // }
// // }
// // ###
// // ###
// timer.end(); float t = timer.get(); // elapsed time in seconds
// cout << "time: " << t*1000 << " ms" << endl;
// #endif
// GPU time
Timer timerg; timerg.start();
// ###
// ###
// initialize device memory
float *d_kernel_sigma = NULL;
float *d_kernel_ro = NULL;
float *d_imgIn = NULL;
float *d_imgG = NULL;
float *d_imgTimeGrad = NULL;
float *d_imgV1 = NULL;
float *d_imgV2 = NULL;
float *d_imgS = NULL;
float *d_imgM11 = NULL;
float *d_imgM12 = NULL;
float *d_imgM22 = NULL;
float *d_imgT11 = NULL;
float *d_imgT12 = NULL;
float *d_imgT22 = NULL;
float *d_imgOut = NULL;
cudaMalloc( &d_kernel_sigma, kernel_size_sigma*kernel_size_sigma*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_kernel_ro, kernel_size_ro*kernel_size_ro*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgIn, w*h*nc*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgG, 4*w*h*nc*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgTimeGrad, w*h*nc*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgV1, w*h*nc*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgV2, w*h*nc*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgS, w*h*nc*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgM11, w*h*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgM12, w*h*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgM22, w*h*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgT11, w*h*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgT12, w*h*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgT22, w*h*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgOut, w*h*nc*sizeof(float) ); CUDA_CHECK;
cudaMemset( d_imgS, 0, w*h*nc*sizeof(float) ); CUDA_CHECK;
cudaMemset( d_imgM11, 0, w*h*sizeof(float) ); CUDA_CHECK;
cudaMemset( d_imgM12, 0, w*h*sizeof(float) ); CUDA_CHECK;
cudaMemset( d_imgM22, 0, w*h*sizeof(float) ); CUDA_CHECK;
cudaMemset( d_imgT11, 0, w*h*sizeof(float) ); CUDA_CHECK;
cudaMemset( d_imgT12, 0, w*h*sizeof(float) ); CUDA_CHECK;
cudaMemset( d_imgT22, 0, w*h*sizeof(float) ); CUDA_CHECK;
cudaMemset( d_imgOut, 0, w*h*nc*sizeof(float) ); CUDA_CHECK;
// copy image and kernel to device
cudaMemcpy( d_kernel_sigma, kernel_sigma, kernel_size_sigma*kernel_size_sigma*sizeof(float), cudaMemcpyHostToDevice ); CUDA_CHECK;
cudaMemcpy( d_kernel_ro, kernel_ro, kernel_size_ro*kernel_size_ro*sizeof(float), cudaMemcpyHostToDevice ); CUDA_CHECK;
cudaMemcpy( d_imgIn, imgIn, w*h*nc*sizeof(float), cudaMemcpyHostToDevice ); CUDA_CHECK;
// launch kernel
dim3 block = dim3(32,8,1);
dim3 grid = dim3( (w + block.x -1)/block.x, (h + block.y -1)/block.y, 1);
// G tensor is only calculated once
gpu_convolution <<<grid,block>>> (d_imgIn, d_imgS, d_kernel_sigma, w, h, nc, kernel_size_ro); CUDA_CHECK;
gradient <<<grid,block>>> (d_imgS, d_imgV1, d_imgV2, w, h, nc); CUDA_CHECK;
gpu_m_product <<<grid,block>>> (d_imgV1, d_imgV2, d_imgM11, d_imgM12, d_imgM22, w, h, nc); CUDA_CHECK;
gpu_convolution <<<grid,block>>> (d_imgM11, d_imgT11, d_kernel_ro, w, h, 1, kernel_size_ro); CUDA_CHECK;
gpu_convolution <<<grid,block>>> (d_imgM12, d_imgT12, d_kernel_ro, w, h, 1, kernel_size_ro); CUDA_CHECK;
gpu_convolution <<<grid,block>>> (d_imgM22, d_imgT22, d_kernel_ro, w, h, 1, kernel_size_ro); CUDA_CHECK;
calc_G_tensor <<<grid,block>>> (d_imgG, d_imgT11, d_imgT12, d_imgT22, w, h, nc, alpha, C);
// for a lot of time steps
for (int n = 0; n < N; n++) {
gradient <<<grid,block>>> (d_imgS, d_imgV1, d_imgV2, w, h, nc); CUDA_CHECK;
tensor_scaling <<<grid,block>>> (d_imgV1, d_imgV2, d_imgG, w, h, nc); CUDA_CHECK;
divergence_2d <<<grid,block>>> (d_imgV1, d_imgV2, d_imgTimeGrad, w, h, nc); CUDA_CHECK;
time_step <<<grid,block>>> (d_imgS, d_imgTimeGrad, tau, w, h, nc);
}
// ###
// ###
timerg.end(); float tg = timerg.get(); // elapsed time in seconds
#ifndef CAMERA
cout << "time: " << tg*1000 << " ms" << endl;
#endif
#ifndef CAMERA
double minval, maxval;
// // show structure tensor
// cudaMemcpy( imgOutTii, d_imgT11, w*h*sizeof(float), cudaMemcpyDeviceToHost ); CUDA_CHECK;
// convert_layered_to_mat(mOutMii, imgOutTii);
// cv::minMaxLoc(mOutMii, &minval, &maxval);
// showImage("GPU M11 (Structure Tensor)", mOutMii/maxval, 50 + w, 100);
//
// cudaMemcpy( imgOutTii, d_imgT12, w*h*sizeof(float), cudaMemcpyDeviceToHost ); CUDA_CHECK;
// convert_layered_to_mat(mOutMii, imgOutTii);
// cv::minMaxLoc(mOutMii, &minval, &maxval);
// showImage("GPU M12 (Structure Tensor)", mOutMii/maxval, 50 + 2*w, 100);
//
// cudaMemcpy( imgOutTii, d_imgT22, w*h*sizeof(float), cudaMemcpyDeviceToHost ); CUDA_CHECK;
// convert_layered_to_mat(mOutMii, imgOutTii);
// cv::minMaxLoc(mOutMii, &minval, &maxval);
// showImage("GPU M22 (Structure Tensor)", mOutMii/maxval, 50 + 3*w, 100);
#endif
// show input image
showImage("Input", mIn, 50, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
cudaMemcpy( imgOut, d_imgS, nc*w*h*sizeof(float), cudaMemcpyDeviceToHost ); CUDA_CHECK;
convert_layered_to_mat(mOut, imgOut);
showImage("GPU Result", mOut, 50 + w/2, 100 + h);
// ### Display your own output images here as needed
// show output image: first convert to interleaved opencv format from the layered raw array
cudaMemcpy( imgAux, d_imgG, nc*w*h*sizeof(float), cudaMemcpyDeviceToHost ); CUDA_CHECK;
convert_layered_to_mat(mAux, imgAux);
cv::minMaxLoc(mAux, &minval, &maxval);
showImage("GPU Aux image", mAux, 50 + w/2 + w, 100 + h);
// show kernel image
convert_layered_to_mat(mKer, kernel_sigma);
// double min, max;
// cv::minMaxLoc(mKer, &min, &max);
showImage("Kernel sigma", mKer/kernel_sigma[kernel_size_sigma*kernel_size_sigma/2], 50 - kernel_size_sigma, 100); // mKer is upscaled with its largest value for visualization
// free device memory
cudaFree(d_kernel_sigma);
cudaFree(d_kernel_ro);
cudaFree(d_imgIn);
cudaFree(d_imgTimeGrad);
cudaFree(d_imgG);
cudaFree(d_imgV1);
cudaFree(d_imgV2);
cudaFree(d_imgS);
cudaFree(d_imgM11);
cudaFree(d_imgM12);
cudaFree(d_imgM22);
cudaFree(d_imgT11);
cudaFree(d_imgT12);
cudaFree(d_imgT22);
cudaFree(d_imgOut);
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
delete[] imgOutTii;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
// clamp an index to the min and max values specified
int clamp(int idx, int minval, int maxval) {
// int clamped_idx = idx;
// if (idx < min) clamped_idx = min;
// else if (idx > max) clamped_idx = max;
// return clamped_idx;
return min(maxval, max(idx, minval));
}
|
ac64d69d7ec859474f7d00bc436066210b30cfa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "himenocuda.cuh"
#include <iostream>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
//#define _DEBUG
using namespace std;
void CHECK_CUDAERR(int line, hipError_t ce)
{
if (ce != hipSuccess){
cout << "Error: line " << line << " "<< hipGetErrorString(ce) << endl;
}
}
PRECISION * fa_d, * fb_d, * fc_d,
* fp_d, * fwrk1_d, * fwrk2_d, * fbnd_d,
**** a_d, **** b_d, **** c_d,
*** p_d, *** wrk1_d, *** wrk2_d, *** bnd_d,
* gosa_d,
**** a_h, **** b_h, **** c_h,
*** p_h, *** wrk1_h, *** wrk2_h, *** bnd_h,
* gosa_h;
PRECISION **** fake_h, **** fake_d, * ffake_d;
typedef void * PtrObj;
__global__ void bmtJacobiKernel(
PRECISION **** a, PRECISION **** b, PRECISION **** c,
PRECISION *** p, PRECISION *** wrk1, PRECISION *** wrk2,
PRECISION *** bnd, PRECISION * gosa,
int imax, int jmax, int kmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
PRECISION s0, ss, omega = 0.8;
// __shared__ PRECISION wgosa;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
// int tid = (threadIdx.z * (blockDim.y * blockDim.x)) +
// (threadIdx.y * blockDim.x) +
// threadIdx.x;
// if (tid == 0)
// wgosa = 0.0;
// __syncthreads();
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
for (int xxx=0; xxx<8; xxx++)
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for(int yyy=0; yyy<8; yyy++)
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (int zzz=0; zzz<8; zzz++)
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
s0 = a[0][i][j][k] * p[i+1][j ][k ]
+ a[1][i][j][k] * p[i ][j+1][k ]
+ a[2][i][j][k] * p[i ][j ][k+1]
+ b[0][i][j][k] * ( p[i+1][j+1][k ] - p[i+1][j-1][k ]
- p[i-1][j+1][k ] + p[i-1][j-1][k ] )
+ b[1][i][j][k] * ( p[i ][j+1][k+1] - p[i ][j-1][k+1]
- p[i ][j+1][k-1] + p[i ][j-1][k-1] )
+ b[2][i][j][k] * ( p[i+1][j ][k+1] - p[i-1][j ][k+1]
- p[i+1][j ][k-1] + p[i-1][j ][k-1] )
+ c[0][i][j][k] * p[i-1][j ][k ]
+ c[1][i][j][k] * p[i ][j-1][k ]
+ c[2][i][j][k] * p[i ][j ][k-1]
+ wrk1[i][j][k];
ss = ( s0 * a[3][i][j][k] - p[i][j][k] ) * bnd[i][j][k];
atomicAdd(gosa, ss*ss);
wrk2[i][j][k] = p[i][j][k] + omega * ss;
}
}
}
// __syncthreads();
/*
for (i=1;i<imax-1;++i) {
for (j=1;j<jmax-1;++j) {
for (k=1;k<kmax-1;++k) {
*/
#if 0
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
p[i][j][k] = wrk2[i][j][k];
}
}
}
#endif
#if 0
if (tid == 0) {
printf("gosa: %f\n", wgosa);
atomicAdd(gosa, wgosa);
}
#endif
}
__global__ void bmtUpdatePressureKernel(
PRECISION *** p, PRECISION *** wrk2, int imax, int jmax, int kmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
p[i][j][k] = wrk2[i][j][k];
}
}
}
}
#ifdef _DEBUG
__global__ void DebugKernel(PRECISION **** a) {
a[0][1][2][3] = 100;
a[3][0][2][1] = 200;
}
#endif
int bmtAssign_MultiDimension_Space_Rec(
PtrObj * ptrobj, PtrObj * ptrobj_d, PRECISION * flat_d, int dim,
int mdim, int * adim, int poffset, int doffset,
int * blocks) {
#ifdef _DEBUG
#define INDENT for (int i=0;i<dim;i++) cout << "\t";
#endif
int iIdx, offset = doffset;
if (dim < mdim - 2) {
int nloffset = 1;
for (int idx=0;idx<=dim;idx++)
nloffset *= adim[idx];
nloffset += doffset;
for (iIdx=0;iIdx<adim[dim];iIdx++) {
blocks[dim] = iIdx;
int loffset = 0;
if (dim > 0) {
int b=0;
for (int i=0;i<dim;i++) {
if (i != dim - 1)
b += blocks[i] * adim[i+1];
else
b += blocks[i];
}
loffset += adim[dim] * b;
}
#ifdef _DEBUG
INDENT;
cout << "[" << dim << ", " << iIdx << "]:" << adim[dim]
<< ": " << offset + loffset<< endl;
#endif
bmtAssign_MultiDimension_Space_Rec(
ptrobj, ptrobj_d, flat_d, dim + 1,
mdim, adim, offset + loffset, nloffset, blocks);
if ((poffset != -1) && (iIdx == 0))
ptrobj[poffset] = ptrobj_d + offset + loffset;
/*reinterpret_cast<PtrObj>(offset+loffset);*/
offset++;
}
}
else {
if (dim > 0) {
int b=0;
for (int i=0;i<dim;i++) {
if (i != dim - 1)
b += blocks[i] * adim[i+1];
else
b += blocks[i];
}
offset += adim[dim] * b;
}
for (iIdx=0;iIdx<adim[dim];iIdx++) {
#ifdef _DEBUG
INDENT;
cout << "[" << dim << ", " << iIdx << "]:" << adim[dim]
<< ": " << offset << endl;
#endif
if ((poffset != -1) && (iIdx == 0))
ptrobj[poffset] = ptrobj_d + offset;
/*reinterpret_cast<PtrObj>(offset);*/
int foffset = 0;
for (int i=0;i<mdim-1;i++) {
int ele = 1;
for (int j=i+1;j<mdim;j++)
ele *= adim[j];
if (i < mdim - 2)
foffset += blocks[i] * ele;
else
foffset += iIdx * ele;
}
ptrobj[offset] = flat_d + foffset;
/*reinterpret_cast<PtrObj>(foffset);*/
offset++;
}
}
return 0;
}
int bmtCreateDevice_MultiDimension_Space(
PRECISION ** m_h, PRECISION ** m_d, PRECISION * fm_d,
int dim, int * adim) {
int iIdx, jIdx, cnt = 1;
//Determine the number of blocks for storing pointer objects
for (iIdx=0;iIdx<dim-1;iIdx++)
cnt *= adim[iIdx];
for (iIdx=dim-3;iIdx>=0;iIdx--) {
int tcnt = 1;
for (jIdx=iIdx;jIdx>=0;jIdx--)
tcnt *= adim[jIdx];
cnt += tcnt;
}
#ifdef _DEBUG
cout << "***" << cnt << endl;
#endif
//Allocate blocks for storing pointer objects on both host and device
PtrObj * tm_h, * tm_d;
tm_h = new PtrObj[cnt];
CHECK_CUDAERR( __LINE__, hipMalloc(&tm_d, cnt * sizeof(PtrObj)));
//Assign pointer values to blocks
int blocks[4];
bmtAssign_MultiDimension_Space_Rec(
tm_h, tm_d, fm_d, 0, dim,
adim, -1, 0, blocks);
//Transfer the created multidimentional array to device
CHECK_CUDAERR( __LINE__, hipMemcpy(tm_d, tm_h,
cnt * sizeof(PtrObj), hipMemcpyHostToDevice));
*m_h = reinterpret_cast<PRECISION *>(tm_h);
*m_d = reinterpret_cast<PRECISION *>(tm_d);
#ifdef _DEBUG
cout << endl << "Origin:\t" << tm_d << endl;
for (iIdx=0;iIdx<cnt;iIdx++)
cout << iIdx << ":\t" << tm_h[iIdx] << endl;
#endif
return 0;
}
hipError_t bmtInitDeviceMemory(
Matrix * pa, Matrix * pb, Matrix * pc,
Matrix * pp, Matrix * pwrk1, Matrix * pwrk2,
Matrix * pbnd, int peid, BMT_Config config, bool copy
/* PRECISION ** fa_d, PRECISION ** fb_d, PRECISION ** fc_d, PRECISION ** fp_d, PRECISION ** fwrk1_d,
PRECISION ** fwrk2_d, PRECISION ** fbnd_d, PRECISION ***** a_d, PRECISION ***** b_d, PRECISION ***** c_d,
PRECISION **** p_d, PRECISION **** wrk1_d, PRECISION **** wrk2_d, PRECISION **** bnd_d,
PRECISION ** gosa_d, PRECISION ***** a_h, PRECISION ***** b_h, PRECISION ***** c_h,
PRECISION **** p_h, PRECISION **** wrk1_h, PRECISION **** wrk2_h, PRECISION **** bnd_h,
PRECISION ** gosa_h*/) {
int devCnt = 0;
CHECK_CUDAERR( __LINE__, hipGetDeviceCount(&devCnt));
CHECK_CUDAERR( __LINE__, hipSetDevice(peid % devCnt));
// cout << "\t\t[" << peid << "]" << "cudasetdevice: " << peid % devCnt << endl;
gosa_h = new PRECISION[1];
CHECK_CUDAERR( __LINE__, hipMalloc(&gosa_d, sizeof(PRECISION)));
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
CHECK_CUDAERR( __LINE__, hipMalloc(&fa_d, 4 * memreq_3d));
CHECK_CUDAERR( __LINE__, hipMalloc(&fb_d, 3 * memreq_3d));
CHECK_CUDAERR( __LINE__, hipMalloc(&fc_d, 3 * memreq_3d));
CHECK_CUDAERR( __LINE__, hipMalloc(&fp_d, memreq_3d));
CHECK_CUDAERR( __LINE__, hipMalloc(&fwrk1_d, memreq_3d));
CHECK_CUDAERR( __LINE__, hipMalloc(&fwrk2_d, memreq_3d));
CHECK_CUDAERR( __LINE__, hipMalloc(&fbnd_d, memreq_3d));
CHECK_CUDAERR( __LINE__, hipMemcpy(fa_d, pa->mpVal,
4 * memreq_3d, hipMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, hipMemcpy(fb_d, pb->mpVal,
3 * memreq_3d, hipMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, hipMemcpy(fc_d, pc->mpVal,
3 * memreq_3d, hipMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, hipMemcpy(fp_d, pp->mpVal,
memreq_3d, hipMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, hipMemcpy(fwrk1_d, pwrk1->mpVal,
memreq_3d, hipMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, hipMemcpy(fwrk2_d, pwrk2->mpVal,
memreq_3d, hipMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, hipMemcpy(fbnd_d, pbnd->mpVal,
memreq_3d, hipMemcpyHostToDevice));
#ifndef _DEBUG
//Construct multi-dimensional space for matrices
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&a_h),
reinterpret_cast<PRECISION **>(&a_d),
fa_d, pa->mDim, pa->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&b_h),
reinterpret_cast<PRECISION **>(&b_d),
fb_d, pb->mDim, pb->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&c_h),
reinterpret_cast<PRECISION **>(&c_d),
fc_d, pc->mDim, pc->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&p_h),
reinterpret_cast<PRECISION **>(&p_d),
fp_d, pp->mDim, pp->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&wrk1_h),
reinterpret_cast<PRECISION **>(&wrk1_d),
fwrk1_d, pwrk1->mDim, pwrk1->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&wrk2_h),
reinterpret_cast<PRECISION **>(&wrk2_d),
fwrk2_d, pwrk2->mDim, pwrk2->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&bnd_h),
reinterpret_cast<PRECISION **>(&bnd_d),
fbnd_d, pbnd->mDim, pbnd->mpDim);
#else
Matrix * pfake;
pfake = new Matrix(4,2,3,4);
CHECK_CUDAERR( __LINE__, hipMalloc(&ffake_d, 4 * 2 * 3 * 4 * sizeof(PRECISION)));
CHECK_CUDAERR( __LINE__, hipMemset(ffake_d, 0, 4 * 2 * 3 * 4 * sizeof(PRECISION)));
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&fake_h),
reinterpret_cast<PRECISION **>(&fake_d),
ffake_d, pfake->mDim, pfake->mpDim);
hipLaunchKernelGGL(( DebugKernel) , dim3(256), dim3(512), 0, 0, fake_d);
CHECK_CUDAERR( __LINE__, hipDeviceSynchronize());
CHECK_CUDAERR( __LINE__, hipMemcpy(pfake->mpVal, ffake_d, 4 * 2 * 3 * 4 *
sizeof(PRECISION), hipMemcpyDeviceToHost));
for (int i=0;i<4;i++) {
cout << "[0, " << i << "]" << endl;
for (int j=0;j<2;j++) {
cout << "\t[1, " << j << "]" << endl;
for (int k=0;k<3;k++) {
cout << "\t\t[2, " << k << "]" << endl;
cout << "\t\t";
for (int l=0;l<4;l++) {
cout << pfake->mpVal[(i*24)+(j*12)+(k*4)+l] << "\t";
}
cout << endl;
}
cout << endl;
}
}
#endif
return hipSuccess;
}
hipError_t dealloc() {
/* delete[] gosa_h;
delete[] a_h;
delete[] b_h;*/
CHECK_CUDAERR( __LINE__, hipFree(gosa_d));
CHECK_CUDAERR( __LINE__, hipFree(fa_d));
CHECK_CUDAERR( __LINE__, hipFree(fb_d));
CHECK_CUDAERR( __LINE__, hipFree(fc_d));
CHECK_CUDAERR( __LINE__, hipFree(fp_d));
CHECK_CUDAERR( __LINE__, hipFree(fwrk1_d));
CHECK_CUDAERR( __LINE__, hipFree(fwrk2_d));
CHECK_CUDAERR( __LINE__, hipFree(fbnd_d));
CHECK_CUDAERR( __LINE__, hipFree(ffake_d));
}
hipError_t bmtCudaJacobi(PRECISION * gosa, Matrix * pp,
int imax, int jmax, int kmax,
BMT_Config config, int peid
/* PRECISION * fa_d, PRECISION * fb_d, PRECISION * fc_d, PRECISION * fp_d, PRECISION * fwrk1_d,
PRECISION* fwrk2_d, PRECISION* fbnd_d, PRECISION **** a_d, PRECISION **** b_d, PRECISION **** c_d,
PRECISION*** p_d, PRECISION *** wrk1_d, PRECISION *** wrk2_d, PRECISION *** bnd_d,
PRECISION * gosa_d, PRECISION **** a_h, PRECISION **** b_h, PRECISION **** c_h,
PRECISION *** p_h, PRECISION *** wrk1_h, PRECISION *** wrk2_h, PRECISION *** bnd_h,
PRECISION * gosa_h*/) {
int devCnt = 0;
CHECK_CUDAERR( __LINE__, hipGetDeviceCount(&devCnt));
CHECK_CUDAERR( __LINE__, hipSetDevice(peid % devCnt));
char hostname[128];
gethostname(hostname, 128);
cout << "\t\t[" << peid << "]: hipSetDevice " <<
peid % devCnt << " on [" << hostname << "]"<< endl;
dim3 grid(16, 16, 1);
dim3 block(1, 1, 64);
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
// for (int idx=0;idx<nn;idx++) {
//Jacobi
struct timeval tb, te;
// cout << "Launching" << endl;
gettimeofday(&tb, NULL);
CHECK_CUDAERR( __LINE__, hipMemset(gosa_d, 0, sizeof(PRECISION)));
hipLaunchKernelGGL(( bmtJacobiKernel) , dim3(grid), dim3(block), 0, 0,
a_d, b_d, c_d, p_d, wrk1_d, wrk2_d, bnd_d, gosa_d,
imax, jmax, kmax);
CHECK_CUDAERR( __LINE__, hipDeviceSynchronize());
//Update Pressure Matrix
hipLaunchKernelGGL(( bmtUpdatePressureKernel) , dim3(grid), dim3(block), 0, 0,
p_d, wrk2_d,
imax, jmax, kmax);
CHECK_CUDAERR( __LINE__, hipDeviceSynchronize());
gettimeofday(&te, NULL);
cout << "\t***Kernel: " << ((te.tv_usec - tb.tv_usec) * 1e-6 + ((double)te.tv_sec - (double)tb.tv_sec)) << endl;
CHECK_CUDAERR( __LINE__, hipMemcpy(gosa_h, gosa_d,
sizeof(PRECISION), hipMemcpyDeviceToHost));
CHECK_CUDAERR( __LINE__, hipMemcpy(pp->mpVal, fp_d,
memreq_3d, hipMemcpyDeviceToHost));
*gosa = *gosa_h;
dealloc();
// cout << idx << ": " << *gosa_h << endl;
// }
// CHECK_CUDAERR( hipMemcpy(gosa_h, gosa_d,
// sizeof(PRECISION), hipMemcpyDeviceToHost));
return hipSuccess;
}
| ac64d69d7ec859474f7d00bc436066210b30cfa4.cu | #include "himenocuda.cuh"
#include <iostream>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
//#define _DEBUG
using namespace std;
void CHECK_CUDAERR(int line, cudaError_t ce)
{
if (ce != cudaSuccess){
cout << "Error: line " << line << " "<< cudaGetErrorString(ce) << endl;
}
}
PRECISION * fa_d, * fb_d, * fc_d,
* fp_d, * fwrk1_d, * fwrk2_d, * fbnd_d,
**** a_d, **** b_d, **** c_d,
*** p_d, *** wrk1_d, *** wrk2_d, *** bnd_d,
* gosa_d,
**** a_h, **** b_h, **** c_h,
*** p_h, *** wrk1_h, *** wrk2_h, *** bnd_h,
* gosa_h;
PRECISION **** fake_h, **** fake_d, * ffake_d;
typedef void * PtrObj;
__global__ void bmtJacobiKernel(
PRECISION **** a, PRECISION **** b, PRECISION **** c,
PRECISION *** p, PRECISION *** wrk1, PRECISION *** wrk2,
PRECISION *** bnd, PRECISION * gosa,
int imax, int jmax, int kmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
PRECISION s0, ss, omega = 0.8;
// __shared__ PRECISION wgosa;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
// int tid = (threadIdx.z * (blockDim.y * blockDim.x)) +
// (threadIdx.y * blockDim.x) +
// threadIdx.x;
// if (tid == 0)
// wgosa = 0.0;
// __syncthreads();
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
for (int xxx=0; xxx<8; xxx++)
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for(int yyy=0; yyy<8; yyy++)
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (int zzz=0; zzz<8; zzz++)
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
s0 = a[0][i][j][k] * p[i+1][j ][k ]
+ a[1][i][j][k] * p[i ][j+1][k ]
+ a[2][i][j][k] * p[i ][j ][k+1]
+ b[0][i][j][k] * ( p[i+1][j+1][k ] - p[i+1][j-1][k ]
- p[i-1][j+1][k ] + p[i-1][j-1][k ] )
+ b[1][i][j][k] * ( p[i ][j+1][k+1] - p[i ][j-1][k+1]
- p[i ][j+1][k-1] + p[i ][j-1][k-1] )
+ b[2][i][j][k] * ( p[i+1][j ][k+1] - p[i-1][j ][k+1]
- p[i+1][j ][k-1] + p[i-1][j ][k-1] )
+ c[0][i][j][k] * p[i-1][j ][k ]
+ c[1][i][j][k] * p[i ][j-1][k ]
+ c[2][i][j][k] * p[i ][j ][k-1]
+ wrk1[i][j][k];
ss = ( s0 * a[3][i][j][k] - p[i][j][k] ) * bnd[i][j][k];
atomicAdd(gosa, ss*ss);
wrk2[i][j][k] = p[i][j][k] + omega * ss;
}
}
}
// __syncthreads();
/*
for (i=1;i<imax-1;++i) {
for (j=1;j<jmax-1;++j) {
for (k=1;k<kmax-1;++k) {
*/
#if 0
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
p[i][j][k] = wrk2[i][j][k];
}
}
}
#endif
#if 0
if (tid == 0) {
printf("gosa: %f\n", wgosa);
atomicAdd(gosa, wgosa);
}
#endif
}
__global__ void bmtUpdatePressureKernel(
PRECISION *** p, PRECISION *** wrk2, int imax, int jmax, int kmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
p[i][j][k] = wrk2[i][j][k];
}
}
}
}
#ifdef _DEBUG
__global__ void DebugKernel(PRECISION **** a) {
a[0][1][2][3] = 100;
a[3][0][2][1] = 200;
}
#endif
int bmtAssign_MultiDimension_Space_Rec(
PtrObj * ptrobj, PtrObj * ptrobj_d, PRECISION * flat_d, int dim,
int mdim, int * adim, int poffset, int doffset,
int * blocks) {
#ifdef _DEBUG
#define INDENT for (int i=0;i<dim;i++) cout << "\t";
#endif
int iIdx, offset = doffset;
if (dim < mdim - 2) {
int nloffset = 1;
for (int idx=0;idx<=dim;idx++)
nloffset *= adim[idx];
nloffset += doffset;
for (iIdx=0;iIdx<adim[dim];iIdx++) {
blocks[dim] = iIdx;
int loffset = 0;
if (dim > 0) {
int b=0;
for (int i=0;i<dim;i++) {
if (i != dim - 1)
b += blocks[i] * adim[i+1];
else
b += blocks[i];
}
loffset += adim[dim] * b;
}
#ifdef _DEBUG
INDENT;
cout << "[" << dim << ", " << iIdx << "]:" << adim[dim]
<< ": " << offset + loffset<< endl;
#endif
bmtAssign_MultiDimension_Space_Rec(
ptrobj, ptrobj_d, flat_d, dim + 1,
mdim, adim, offset + loffset, nloffset, blocks);
if ((poffset != -1) && (iIdx == 0))
ptrobj[poffset] = ptrobj_d + offset + loffset;
/*reinterpret_cast<PtrObj>(offset+loffset);*/
offset++;
}
}
else {
if (dim > 0) {
int b=0;
for (int i=0;i<dim;i++) {
if (i != dim - 1)
b += blocks[i] * adim[i+1];
else
b += blocks[i];
}
offset += adim[dim] * b;
}
for (iIdx=0;iIdx<adim[dim];iIdx++) {
#ifdef _DEBUG
INDENT;
cout << "[" << dim << ", " << iIdx << "]:" << adim[dim]
<< ": " << offset << endl;
#endif
if ((poffset != -1) && (iIdx == 0))
ptrobj[poffset] = ptrobj_d + offset;
/*reinterpret_cast<PtrObj>(offset);*/
int foffset = 0;
for (int i=0;i<mdim-1;i++) {
int ele = 1;
for (int j=i+1;j<mdim;j++)
ele *= adim[j];
if (i < mdim - 2)
foffset += blocks[i] * ele;
else
foffset += iIdx * ele;
}
ptrobj[offset] = flat_d + foffset;
/*reinterpret_cast<PtrObj>(foffset);*/
offset++;
}
}
return 0;
}
int bmtCreateDevice_MultiDimension_Space(
PRECISION ** m_h, PRECISION ** m_d, PRECISION * fm_d,
int dim, int * adim) {
int iIdx, jIdx, cnt = 1;
//Determine the number of blocks for storing pointer objects
for (iIdx=0;iIdx<dim-1;iIdx++)
cnt *= adim[iIdx];
for (iIdx=dim-3;iIdx>=0;iIdx--) {
int tcnt = 1;
for (jIdx=iIdx;jIdx>=0;jIdx--)
tcnt *= adim[jIdx];
cnt += tcnt;
}
#ifdef _DEBUG
cout << "***" << cnt << endl;
#endif
//Allocate blocks for storing pointer objects on both host and device
PtrObj * tm_h, * tm_d;
tm_h = new PtrObj[cnt];
CHECK_CUDAERR( __LINE__, cudaMalloc(&tm_d, cnt * sizeof(PtrObj)));
//Assign pointer values to blocks
int blocks[4];
bmtAssign_MultiDimension_Space_Rec(
tm_h, tm_d, fm_d, 0, dim,
adim, -1, 0, blocks);
//Transfer the created multidimentional array to device
CHECK_CUDAERR( __LINE__, cudaMemcpy(tm_d, tm_h,
cnt * sizeof(PtrObj), cudaMemcpyHostToDevice));
*m_h = reinterpret_cast<PRECISION *>(tm_h);
*m_d = reinterpret_cast<PRECISION *>(tm_d);
#ifdef _DEBUG
cout << endl << "Origin:\t" << tm_d << endl;
for (iIdx=0;iIdx<cnt;iIdx++)
cout << iIdx << ":\t" << tm_h[iIdx] << endl;
#endif
return 0;
}
cudaError_t bmtInitDeviceMemory(
Matrix * pa, Matrix * pb, Matrix * pc,
Matrix * pp, Matrix * pwrk1, Matrix * pwrk2,
Matrix * pbnd, int peid, BMT_Config config, bool copy
/* PRECISION ** fa_d, PRECISION ** fb_d, PRECISION ** fc_d, PRECISION ** fp_d, PRECISION ** fwrk1_d,
PRECISION ** fwrk2_d, PRECISION ** fbnd_d, PRECISION ***** a_d, PRECISION ***** b_d, PRECISION ***** c_d,
PRECISION **** p_d, PRECISION **** wrk1_d, PRECISION **** wrk2_d, PRECISION **** bnd_d,
PRECISION ** gosa_d, PRECISION ***** a_h, PRECISION ***** b_h, PRECISION ***** c_h,
PRECISION **** p_h, PRECISION **** wrk1_h, PRECISION **** wrk2_h, PRECISION **** bnd_h,
PRECISION ** gosa_h*/) {
int devCnt = 0;
CHECK_CUDAERR( __LINE__, cudaGetDeviceCount(&devCnt));
CHECK_CUDAERR( __LINE__, cudaSetDevice(peid % devCnt));
// cout << "\t\t[" << peid << "]" << "cudasetdevice: " << peid % devCnt << endl;
gosa_h = new PRECISION[1];
CHECK_CUDAERR( __LINE__, cudaMalloc(&gosa_d, sizeof(PRECISION)));
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
CHECK_CUDAERR( __LINE__, cudaMalloc(&fa_d, 4 * memreq_3d));
CHECK_CUDAERR( __LINE__, cudaMalloc(&fb_d, 3 * memreq_3d));
CHECK_CUDAERR( __LINE__, cudaMalloc(&fc_d, 3 * memreq_3d));
CHECK_CUDAERR( __LINE__, cudaMalloc(&fp_d, memreq_3d));
CHECK_CUDAERR( __LINE__, cudaMalloc(&fwrk1_d, memreq_3d));
CHECK_CUDAERR( __LINE__, cudaMalloc(&fwrk2_d, memreq_3d));
CHECK_CUDAERR( __LINE__, cudaMalloc(&fbnd_d, memreq_3d));
CHECK_CUDAERR( __LINE__, cudaMemcpy(fa_d, pa->mpVal,
4 * memreq_3d, cudaMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, cudaMemcpy(fb_d, pb->mpVal,
3 * memreq_3d, cudaMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, cudaMemcpy(fc_d, pc->mpVal,
3 * memreq_3d, cudaMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, cudaMemcpy(fp_d, pp->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, cudaMemcpy(fwrk1_d, pwrk1->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, cudaMemcpy(fwrk2_d, pwrk2->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
CHECK_CUDAERR( __LINE__, cudaMemcpy(fbnd_d, pbnd->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
#ifndef _DEBUG
//Construct multi-dimensional space for matrices
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&a_h),
reinterpret_cast<PRECISION **>(&a_d),
fa_d, pa->mDim, pa->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&b_h),
reinterpret_cast<PRECISION **>(&b_d),
fb_d, pb->mDim, pb->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&c_h),
reinterpret_cast<PRECISION **>(&c_d),
fc_d, pc->mDim, pc->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&p_h),
reinterpret_cast<PRECISION **>(&p_d),
fp_d, pp->mDim, pp->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&wrk1_h),
reinterpret_cast<PRECISION **>(&wrk1_d),
fwrk1_d, pwrk1->mDim, pwrk1->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&wrk2_h),
reinterpret_cast<PRECISION **>(&wrk2_d),
fwrk2_d, pwrk2->mDim, pwrk2->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&bnd_h),
reinterpret_cast<PRECISION **>(&bnd_d),
fbnd_d, pbnd->mDim, pbnd->mpDim);
#else
Matrix * pfake;
pfake = new Matrix(4,2,3,4);
CHECK_CUDAERR( __LINE__, cudaMalloc(&ffake_d, 4 * 2 * 3 * 4 * sizeof(PRECISION)));
CHECK_CUDAERR( __LINE__, cudaMemset(ffake_d, 0, 4 * 2 * 3 * 4 * sizeof(PRECISION)));
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&fake_h),
reinterpret_cast<PRECISION **>(&fake_d),
ffake_d, pfake->mDim, pfake->mpDim);
DebugKernel <<<256, 512>>> (fake_d);
CHECK_CUDAERR( __LINE__, cudaDeviceSynchronize());
CHECK_CUDAERR( __LINE__, cudaMemcpy(pfake->mpVal, ffake_d, 4 * 2 * 3 * 4 *
sizeof(PRECISION), cudaMemcpyDeviceToHost));
for (int i=0;i<4;i++) {
cout << "[0, " << i << "]" << endl;
for (int j=0;j<2;j++) {
cout << "\t[1, " << j << "]" << endl;
for (int k=0;k<3;k++) {
cout << "\t\t[2, " << k << "]" << endl;
cout << "\t\t";
for (int l=0;l<4;l++) {
cout << pfake->mpVal[(i*24)+(j*12)+(k*4)+l] << "\t";
}
cout << endl;
}
cout << endl;
}
}
#endif
return cudaSuccess;
}
cudaError_t dealloc() {
/* delete[] gosa_h;
delete[] a_h;
delete[] b_h;*/
CHECK_CUDAERR( __LINE__, cudaFree(gosa_d));
CHECK_CUDAERR( __LINE__, cudaFree(fa_d));
CHECK_CUDAERR( __LINE__, cudaFree(fb_d));
CHECK_CUDAERR( __LINE__, cudaFree(fc_d));
CHECK_CUDAERR( __LINE__, cudaFree(fp_d));
CHECK_CUDAERR( __LINE__, cudaFree(fwrk1_d));
CHECK_CUDAERR( __LINE__, cudaFree(fwrk2_d));
CHECK_CUDAERR( __LINE__, cudaFree(fbnd_d));
CHECK_CUDAERR( __LINE__, cudaFree(ffake_d));
}
cudaError_t bmtCudaJacobi(PRECISION * gosa, Matrix * pp,
int imax, int jmax, int kmax,
BMT_Config config, int peid
/* PRECISION * fa_d, PRECISION * fb_d, PRECISION * fc_d, PRECISION * fp_d, PRECISION * fwrk1_d,
PRECISION* fwrk2_d, PRECISION* fbnd_d, PRECISION **** a_d, PRECISION **** b_d, PRECISION **** c_d,
PRECISION*** p_d, PRECISION *** wrk1_d, PRECISION *** wrk2_d, PRECISION *** bnd_d,
PRECISION * gosa_d, PRECISION **** a_h, PRECISION **** b_h, PRECISION **** c_h,
PRECISION *** p_h, PRECISION *** wrk1_h, PRECISION *** wrk2_h, PRECISION *** bnd_h,
PRECISION * gosa_h*/) {
int devCnt = 0;
CHECK_CUDAERR( __LINE__, cudaGetDeviceCount(&devCnt));
CHECK_CUDAERR( __LINE__, cudaSetDevice(peid % devCnt));
char hostname[128];
gethostname(hostname, 128);
cout << "\t\t[" << peid << "]: cudaSetDevice " <<
peid % devCnt << " on [" << hostname << "]"<< endl;
dim3 grid(16, 16, 1);
dim3 block(1, 1, 64);
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
// for (int idx=0;idx<nn;idx++) {
//Jacobi
struct timeval tb, te;
// cout << "Launching" << endl;
gettimeofday(&tb, NULL);
CHECK_CUDAERR( __LINE__, cudaMemset(gosa_d, 0, sizeof(PRECISION)));
bmtJacobiKernel <<<grid, block>>> (
a_d, b_d, c_d, p_d, wrk1_d, wrk2_d, bnd_d, gosa_d,
imax, jmax, kmax);
CHECK_CUDAERR( __LINE__, cudaDeviceSynchronize());
//Update Pressure Matrix
bmtUpdatePressureKernel <<<grid, block>>> (
p_d, wrk2_d,
imax, jmax, kmax);
CHECK_CUDAERR( __LINE__, cudaDeviceSynchronize());
gettimeofday(&te, NULL);
cout << "\t***Kernel: " << ((te.tv_usec - tb.tv_usec) * 1e-6 + ((double)te.tv_sec - (double)tb.tv_sec)) << endl;
CHECK_CUDAERR( __LINE__, cudaMemcpy(gosa_h, gosa_d,
sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHECK_CUDAERR( __LINE__, cudaMemcpy(pp->mpVal, fp_d,
memreq_3d, cudaMemcpyDeviceToHost));
*gosa = *gosa_h;
dealloc();
// cout << idx << ": " << *gosa_h << endl;
// }
// CHECK_CUDAERR( cudaMemcpy(gosa_h, gosa_d,
// sizeof(PRECISION), cudaMemcpyDeviceToHost));
return cudaSuccess;
}
|
d4adda14681efa76839697761061015f1c947541.hip | // !!! This is a file automatically generated by hipify!!!
extern "C"
{
#include "completion.h"
#include "base.h"
#include "ciss.h"
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
#include <time.h>
#include <stdint.h>
}
#include "als.cuh"
#include "loss.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusparse_v2.h>
#include <cusolver_common.h>
#include <cusolverDn.h>
#include <omp.h>
#define HANDLE_SOLVERERR( err ) (HandleSolverErr( err, __FILE__, __LINE__ ))
static void HandleSolverErr( cusolverStatus_t err, const char *file, int line )
{
if(err != CUSOLVER_STATUS_SUCCESS)
{
fprintf(stderr, "ERROR: in %s at line %d (error-code %d)\n",
file, line, err );
fflush(stdout);
exit(-1);
}
}
// gpu global function
/**
* @brief For computing the mttkrp in als
* @version Now only contains the atomic operation
*/
__global__ void p_mttkrp_gpu(cissbasic_t* d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double * d_hbuffer,
idx_t tilenum
)
{
//get thread and block index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
uint8_t flag;
double * entries = d_traina -> entries;
idx_t localtile = tileid * ((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
double __align__(256) localtbuffer[6];
double __align__(256) localmbuffer[2 * DEFAULT_NFACTORS];
//do the mttkrp
if(tileid < tilenum)
{
//get supportive information for tiles
idx_t f_id = (idx_t)(entries[localtile] * (-1)) ;
idx_t l_id = (idx_t)(entries[localtile+1] * (-1)) ;
idx_t bitmap = (idx_t)(entries[localtile+2]);
//if(bitmap == 0) break;
#ifdef DEBUG
if(tileid == 0)
{
printf("f_id %ld, l_id %ld, bitmap %ld\n", f_id, l_id, bitmap);
}
#endif
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = bitmap >> 1;}
bitmap = bitmap >> 1;
localtile += DEFAULT_T_TILE_WIDTH;
#ifdef DEBUG
if(tileid == 0)
{
printf("f_id %ld, l_id %ld, bitmap %ld\n", f_id, l_id, bitmap);
}
#endif
//load in vectorize
for(int m = 0; m < ((idx_t)DEFAULT_T_TILE_LENGTH) / 2; m++ )
{
//unroll loop and load
//((double2*)localtbuffer)[0] = ((double2*)(entries+localtile))[0];
//((double2*)localtbuffer)[1] = ((double2*)(entries+localtile))[1];
//((double2*)localtbuffer)[2] = ((double2*)(entries+localtile))[2];
localtbuffer[0] = entries[localtile];
localtbuffer[1] = entries[localtile + 1];
localtbuffer[2] = entries[localtile + 2];
localtbuffer[3] = entries[localtile + 3];
localtbuffer[4] = entries[localtile + 4];
localtbuffer[5] = entries[localtile + 5];
//do the mttkrp for the first
f_id = f_id + (!(bitmap & 1));
idx_t tmpi = d_traina->directory[f_id];
tmpi--;
#ifdef DEBUG
printf("the fid is %d\n", f_id);
#endif
bitmap = bitmap >> 1;
if((localtbuffer[0] == -1) && (localtbuffer[1] == -1)) break;
for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
double b = d_factorb->values[((idx_t)localtbuffer[0]*DEFAULT_NFACTORS - DEFAULT_NFACTORS ) + j];
double c = d_factorc->values[((idx_t)localtbuffer[1]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[j] * localtbuffer[2]);
}
//if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
/*for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
idx_t b = d_factorb->values[(idx_t)(localtbuffer[0]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
idx_t c = d_factorc->values[(idx_t)(localtbuffer[1]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[j] * localtbuffer[2]);
}*/
//do the mttkrp for the second
flag = !(bitmap & 1);
f_id = f_id + (!(bitmap & 1));
#ifdef DEBUG
printf("the fid is %d\n", f_id);
#endif
tmpi = d_traina->directory[f_id];
tmpi--;
bitmap = bitmap >> 1;
if((localtbuffer[0] == -1) && (localtbuffer[1] == -1)) break;
for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
idx_t b = d_factorb->values[((idx_t)localtbuffer[3]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
idx_t c = d_factorc->values[((idx_t)localtbuffer[4]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[DEFAULT_NFACTORS + j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[DEFAULT_NFACTORS + j] * localtbuffer[5]);
}
//compute the HTH for the first
//compute the HTH for the second
if(flag)
{
for(int i = 0; i < DEFAULT_NFACTORS; i++)
{
for(int j = 0; j <=i ; j++)
{
double presult1 = localmbuffer[i] * localmbuffer[j];
double presult2 = localmbuffer[DEFAULT_NFACTORS + i] * localmbuffer[DEFAULT_NFACTORS + j];
atomicAdd(&(d_hbuffer[(f_id - flag) * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult1);
atomicAdd(&(d_hbuffer[f_id * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult2);
}
}
}
else
{
for(int i = 0; i < DEFAULT_NFACTORS; i++)
{
for(int j = 0; j <=i ; j++)
{
double presult = localmbuffer[i] * localmbuffer[j] + localmbuffer[DEFAULT_NFACTORS + i] * localmbuffer[DEFAULT_NFACTORS + j];
atomicAdd(&(d_hbuffer[f_id * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult);
}
}
}
localtile += 2*DEFAULT_T_TILE_WIDTH;
}
}
}
/**
* @brief For computing the mttkrp in als, only one element on one thread
* @version Now reduce atmoic add with segment scan
*/
__global__ void p_mttkrp_gpu_as(cissbasic_t* d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double * d_hbuffer,
//double * d_hthbuffer,
idx_t tilenum)
{
//get block, warp and thread index
__shared__ uint32_t warpmask[((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)];
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t warpid = tid / ((idx_t)ALS_WARPSIZE);
idx_t laneid = tid % ((idx_t)ALS_WARPSIZE);
idx_t tileid = bid * ((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) + warpid;
double * entries = d_traina -> entries;
idx_t localtile = tileid * ((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
double __align__(256) localtbuffer[3];
double __align__(256) localmbuffer[DEFAULT_NFACTORS];
double mytmp = 0, myntmp = 0;
//initialize the warp mask
if(laneid == 0) warpmask[warpid] = 0xffffffff;
if((tileid * DEFAULT_T_TILE_LENGTH + laneid) == d_traina->nnz)
{
//redefine the mask
warpmask[warpid] = __brev((warpmask[warpid]<<(32-laneid)));
}
__syncwarp();
uint32_t mymask = warpmask[warpid];
#ifdef ALSAS_DEBUG
//printf("now the mymask and mynnz id in thread %ld are %x and %ld\n", tid, mymask, (tileid * DEFAULT_T_TILE_LENGTH + laneid));
#endif
if((tileid < tilenum) && ((tileid * DEFAULT_T_TILE_LENGTH + laneid)<d_traina->nnz))
{
//initialize the information for tile and local entry
idx_t f_id = (idx_t)(entries[localtile] * (-1)) ;
idx_t l_id = (idx_t)(entries[localtile+1] * (-1)) ;
idx_t bitmap = (idx_t)(entries[localtile+2]);
if(bitmap != 0)
{
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = bitmap >> 1;}
bitmap = bitmap >> 1;
idx_t itercounter = __popcll(bitmap) - (bitmap & 1);
#ifdef ALSAS_DEBUG
//if(laneid == 0)
//printf("now the itercounter is %ld\n", itercounter);
#endif
idx_t myfid = f_id + laneid - __popcll((bitmap << (63-laneid))) + 1;
#ifdef ALSAS_DEBUG
//printf("now the myfid in thread %ld is %ld\n", tid, myfid);
#endif
idx_t mybit = ((bitmap >> (laneid)) & 1);
idx_t mylbit = mybit;
if(laneid == 0)
{
mylbit = 0;
mybit = 1;
}
//inter thread computation
localtbuffer[0] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH];
localtbuffer[1] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH + 1];
localtbuffer[2] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH + 2];
idx_t tmpi = d_traina->directory[myfid] - 1;
idx_t b = (idx_t)localtbuffer[0] - 1;
idx_t c = (idx_t)localtbuffer[1] - 1;
//for the hadamard
#ifdef ALSAS_DEBUG
//printf("now the myposition for hthbuffer in thread %ld is %ld\n", tid, (tileid * DEFAULT_T_TILE_LENGTH + laneid));
#endif
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
localmbuffer[m] = d_factorb->values[b * DEFAULT_NFACTORS + m] * d_factorc->values[c * DEFAULT_NFACTORS + m];
//d_hthbuffer[(tileid * DEFAULT_T_TILE_LENGTH + laneid)*DEFAULT_NFACTORS + m] = localmbuffer[m];
}
__syncwarp(mymask);
//reduction in hth
//mytmp: final partial result; myntmp: messages
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
for(int j = 0; j <=m ; j++)
{
mytmp = localmbuffer[m] * localmbuffer[j];
myntmp = mybit * mytmp;
__syncwarp(mymask);
//now the reduction
for(int i = 0; i < itercounter; i++)
{
mytmp = (__shfl_down_sync(mymask, myntmp, 1, (int)ALS_WARPSIZE)) + (!(mylbit)) * mytmp;
myntmp = mybit * mytmp;
__syncwarp(mymask);
}
if(!mybit)
{
atomicAdd(&(d_hbuffer[myfid * DEFAULT_NFACTORS * DEFAULT_NFACTORS + m * DEFAULT_NFACTORS + j]), mytmp);
}
__syncwarp(mymask);
}
}
__syncwarp(mymask);
//reduction in mttkrp
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
mytmp = localmbuffer[m] * localtbuffer[2];
myntmp = mybit * mytmp;
__syncwarp(mymask);
//now the reduction
for(int i = 0; i < itercounter; i++)
{
mytmp = (__shfl_down_sync(mymask, myntmp, 1, (int)ALS_WARPSIZE)) + (!(mylbit)) * mytmp;
myntmp = mybit * mytmp;
__syncwarp(mymask);
}
if(!mybit)
{
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + m]), mytmp);
}
__syncwarp(mymask);
}
}
}
__syncthreads();
}
/**
* @brief For update the H matrices and prepare for inversion as well as equation
* @version Warp shuffle
**/
__global__ void p_hth_update_as(cissbasic_t * d_traina,
double * d_hthbuffer,
double * d_value_a,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength,
double regularization_index)
{
__shared__ double blkmbuffer[((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) * (idx_t)DEFAULT_NFACTORS];
//get block, warp and thread index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t warpid = tid / ((idx_t)ALS_WARPSIZE);
idx_t laneid = tid % ((idx_t)ALS_WARPSIZE);
idx_t tileid = bid * ((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) + warpid;
double __align__(256) localhthbuffer[DEFAULT_NFACTORS]={0};
if(tileid < dlength && laneid < DEFAULT_NFACTORS)
{
idx_t dcounter = d_traina->dcounter[tileid+1] - d_traina->dcounter[tileid];
#ifdef ALSAS_DEBUG
if(laneid == 0) printf("my dcounter is %ld\n and my tileid is %ld\n", dcounter, tileid);
#endif
idx_t basicposition = d_traina->dcounter[tileid];
idx_t basicsposition = warpid * DEFAULT_NFACTORS;
for(idx_t i = 0; i < dcounter; i++)
{
double localvalue = d_hthbuffer[(basicposition + i) * DEFAULT_NFACTORS + laneid];
blkmbuffer[basicsposition + laneid] = localvalue;
__syncwarp();
for(idx_t j = 0; j < DEFAULT_NFACTORS; j++)
{
localhthbuffer[j] += localvalue * blkmbuffer[basicsposition + j];
}
}
__syncwarp();
localhthbuffer[laneid] += regularization_index;
for(idx_t i = 0; i < DEFAULT_NFACTORS; i++)
{
d_hbuffer[tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS + laneid * DEFAULT_NFACTORS + i] = localhthbuffer[i];
}
__syncwarp();
//prepare for ptrs
if(laneid == 0)
{
idx_t fid = d_traina->directory[tileid] - 1;
d_factptr[tileid] = d_value_a + fid * DEFAULT_NFACTORS;
d_hbufptr[tileid] = d_hbuffer + tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
}
}
__syncwarp();
}
/**
* @brief Compute the inverse and finish the final update
* @version Now only with coarse grain
*/
__global__ void p_update_als_gpu(cissbasic_t * d_traina,
ordi_matrix * d_factora,
double * d_hbuffer,
idx_t dlength,
double regularization_index
)
{
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
idx_t basicposition = tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
double lv[DEFAULT_NFACTORS * DEFAULT_NFACTORS]={0};
if(tileid < dlength)
{
//compute the inverse
idx_t tmpi = d_traina->directory[tileid];
tmpi--;
double *av = d_hbuffer + basicposition;
idx_t i = 0;
idx_t j = 0;
idx_t k = 0;
for (i = 0; i < DEFAULT_NFACTORS; ++i)
{
for (j = 0; j <= i; ++j)
{
double inner = 0;
for (k = 0; k < j; ++k)
{
inner += lv[k+(i*DEFAULT_NFACTORS)] * lv[k+(j*DEFAULT_NFACTORS)];
}
if(i == j)
{
lv[j+(i*DEFAULT_NFACTORS)] = sqrt(av[i+(i*DEFAULT_NFACTORS)] - inner + regularization_index);
}
else
{
lv[j+(i*DEFAULT_NFACTORS)] = 1.0 / lv[j+(j*DEFAULT_NFACTORS)] * (av[j+(i*DEFAULT_NFACTORS)] - inner);
}
}
}
for(i = 0; i< DEFAULT_NFACTORS * DEFAULT_NFACTORS; i++)
{
av[i] = 0;
}
idx_t n = 0;
for(n=0; n<DEFAULT_NFACTORS; n++) //get identity matrix
{
av[n+(n*DEFAULT_NFACTORS)] = 1.0;
}
//forward solve
i = 1; //define counters outside the loop
j = 0;
idx_t f = 0;
for(j=0; j < DEFAULT_NFACTORS; ++j)
{
av[j] /= lv[0];
}
for(i=1; i < DEFAULT_NFACTORS; ++i)
{
/* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} L(i,j)X(i,j) */
for(j=0; j < i; ++j)
{
for(f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] -= lv[j+(i*DEFAULT_NFACTORS)] * av[f+(j*DEFAULT_NFACTORS)];
}
}
for(f=0; f <DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
}
for(i=0; i < DEFAULT_NFACTORS; ++i)
{
for(j=i+1; j < DEFAULT_NFACTORS; ++j)
{
lv[j+(i*DEFAULT_NFACTORS)] = lv[i+(j*DEFAULT_NFACTORS)];
lv[i+(j*DEFAULT_NFACTORS)] = 0.0;
}
}
//backsolve
f = 0; //set counters
j = 0;
idx_t row = 2;
/* last row of X is easy */
for(f=0; f < DEFAULT_NFACTORS; ++f) {
i = DEFAULT_NFACTORS - 1;
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
/* now do backward substitution */
for(row=2; row <= DEFAULT_NFACTORS; ++row)
{
i = DEFAULT_NFACTORS - row;
/* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} R(i,j)X(i,j) */
for( j=i+1; j < DEFAULT_NFACTORS; ++j)
{
for( f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] -= lv[j+(i*DEFAULT_NFACTORS)] * av[f+( j * DEFAULT_NFACTORS )];
}
}
for(f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
}
//now do the final update
double * mvals = d_factora->values + tmpi * DEFAULT_NFACTORS;
for(i = 0; i < DEFAULT_NFACTORS; i++)
{
lv[i] = 0;
for(j = 0; j < DEFAULT_NFACTORS; j++)
{
lv[i] += mvals[j] * av[i * DEFAULT_NFACTORS + j];
}
}
//the final transmission
for(i = 0; i < DEFAULT_NFACTORS/2; i++)
{
((double2*)mvals)[i] = ((double2*)lv)[i];
}
}
}
/**
* @brief Update the matrice
* @version Now only with coarse grain
*/
__global__ void p_update_matrice(cissbasic_t * d_traina,
double * d_value_a,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength,
double regularization_index)
{
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
idx_t basicposition = tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
if(tileid < dlength)
{
idx_t tmpi = d_traina->directory[tileid] - 1;
for(idx_t f = 0; f < DEFAULT_NFACTORS; f++)
{
d_hbuffer[basicposition + f*DEFAULT_NFACTORS + f] += regularization_index;
}
d_hbufptr[tileid] = d_hbuffer + basicposition;
d_factptr[tileid] = d_value_a + tmpi * DEFAULT_NFACTORS;
}
}
void p_cholecheck(double * d_factora,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength)
{
}
extern "C"{
/**
* @brief The main function for tensor completion in als
* @param train The tensor for generating factor matrices
* @param validation The tensor for validation(RMSE)
* @param test The tensor for testing the quality
* @param regularization_index Lambda
*/
void tc_als(sptensor_t * traina,
sptensor_t * trainb,
sptensor_t * trainc,
sptensor_t * validation,
sptensor_t * test,
ordi_matrix ** mats,
ordi_matrix ** best_mats,
idx_t algorithm_index,
double regularization_index,
double * best_rmse,
double * tolerance,
idx_t * nbadepochs,
idx_t * bestepochs,
idx_t * max_badepochs)
{
idx_t const nmodes = traina->nmodes;
#ifdef CISS_DEBUG
printf("enter the als\n");
#endif
//initialize the devices
int deviceCount;
hipGetDeviceCount(&deviceCount);
int n;
//print the GPU status
for(n = 0; n < deviceCount; n++)
{
hipDeviceProp_t dprop;
hipGetDeviceProperties(&dprop, n);
printf(" %d: %s\n", n, dprop.name);
}
omp_set_num_threads(deviceCount);
//prepare the tensor in TB-COO
ciss_t * h_cissta = ciss_alloc(traina, 1, deviceCount);
ciss_t * h_cisstb = ciss_alloc(trainb, 2, deviceCount);
ciss_t * h_cisstc = ciss_alloc(trainc, 3, deviceCount);
#ifdef MCISS_DEBUG
fprintf(stdout, "the new tensors for mode 0\n");
cissbasic_display(h_cissta->cissunits[0]);
cissbasic_display(h_cissta->cissunits[1]);
#endif
struct timeval start;
struct timeval end;
idx_t diff;
cissbasic_t ** d_traina = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
cissbasic_t ** d_trainb = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
cissbasic_t ** d_trainc = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
idx_t ** d_directory_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_directory_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_directory_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
double ** d_entries_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_entries_b = (double**)malloc(deviceCount * sizeof(double*));
double ** d_entries_c = (double**)malloc(deviceCount * sizeof(double*));
double ** d_hbuffer = (double**)malloc(deviceCount * sizeof(double*));
//double ** d_hthbuffer = (double**)malloc(deviceCount * sizeof(double*));
int ** d_infoarray = (int**)malloc(deviceCount * sizeof(int*));
double *** d_hbufptr = (double***)malloc(deviceCount * sizeof(double**));
double *** d_factptr = (double***)malloc(deviceCount * sizeof(double**));
ordi_matrix ** d_factora = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorb = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorc = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
double ** d_value_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_b = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_c = (double**)malloc(deviceCount * sizeof(double*));
idx_t * maxdlength = (idx_t*)malloc(deviceCount * sizeof(idx_t));
idx_t * maxnnz = (idx_t*)malloc(deviceCount * sizeof(idx_t));
hipsolverDnHandle_t handle0, handle1;
hipSetDevice(0);
HANDLE_SOLVERERR(hipsolverDnCreate((&handle0)));
hipSetDevice(1);
HANDLE_SOLVERERR(hipsolverDnCreate((&handle1)));
#pragma omp parallel
{
//prepare the threads
unsigned int cpu_thread_id = omp_get_thread_num();
unsigned int num_cpu_threads = omp_get_num_threads();
//set gpus
int gpu_id = -1;
hipSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
hipGetDevice(&gpu_id);
idx_t * d_itemp1, *d_itemp2, *d_itemp3;
double * d_ftemp;
//initialize the cusolver
//HANDLE_SOLVERERR(hipsolverDnCreate((&(handle[gpu_id]))));
//malloc and copy the tensors + matrices to gpu
cissbasic_t * h_traina = h_cissta->cissunits[gpu_id];
cissbasic_t * h_trainb = h_cisstb->cissunits[gpu_id];
cissbasic_t * h_trainc = h_cisstc->cissunits[gpu_id];
//copy tensor for mode-1
HANDLE_ERROR(hipMalloc((void**)&(d_traina[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_directory_a[gpu_id]), h_traina->dlength * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_counter_a[gpu_id]), (h_traina->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_entries_a[gpu_id]), h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&(d_dims_a[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(hipMemcpy(d_counter_a[gpu_id], h_traina->dcounter, (h_traina->dlength + 1)*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_directory_a[gpu_id], h_traina->directory, h_traina->dlength*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_entries_a[gpu_id], h_traina->entries, h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_dims_a[gpu_id], h_traina->dims, nmodes*sizeof(idx_t), hipMemcpyHostToDevice));
d_itemp1 = h_traina->directory;
d_itemp2 = h_traina->dims;
d_itemp3 = h_traina->dcounter;
d_ftemp = h_traina->entries;
h_traina->directory = d_directory_a[gpu_id];
h_traina->dims = d_dims_a[gpu_id];
h_traina->entries = d_entries_a[gpu_id];
h_traina->dcounter = d_counter_a[gpu_id];
HANDLE_ERROR(hipMemcpy(d_traina[gpu_id], h_traina, sizeof(cissbasic_t), hipMemcpyHostToDevice));
h_traina->directory = d_itemp1;
h_traina->dims = d_itemp2;
h_traina->entries = d_ftemp;
h_traina->dcounter = d_itemp3;
//copy tensor for mode-2
HANDLE_ERROR(hipMalloc((void**)&(d_trainb[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_directory_b[gpu_id]), h_trainb->dlength * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_counter_b[gpu_id]), (h_trainb->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_entries_b[gpu_id]), h_trainb->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&(d_dims_b[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(hipMemcpy(d_directory_b[gpu_id], h_trainb->directory, h_trainb->dlength*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_counter_b[gpu_id], h_trainb->dcounter, (h_trainb->dlength + 1)*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_entries_b[gpu_id], h_trainb->entries, h_trainb->size * DEFAULT_T_TILE_WIDTH * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_dims_b[gpu_id], h_trainb->dims, nmodes*sizeof(idx_t), hipMemcpyHostToDevice));
d_itemp1 = h_trainb->directory;
d_itemp2 = h_trainb->dims;
d_itemp3 = h_trainb->dcounter;
d_ftemp = h_trainb->entries;
h_trainb->directory = d_directory_b[gpu_id];
h_trainb->dims = d_dims_b[gpu_id];
h_trainb->entries = d_entries_b[gpu_id];
h_trainb->dcounter = d_counter_b[gpu_id];
HANDLE_ERROR(hipMemcpy(d_trainb[gpu_id], h_trainb, sizeof(cissbasic_t), hipMemcpyHostToDevice));
h_trainb->directory = d_itemp1;
h_trainb->dims = d_itemp2;
h_trainb->entries = d_ftemp;
h_trainb->dcounter = d_itemp3;
//copy tensor for mode-3
HANDLE_ERROR(hipMalloc((void**)&(d_trainc[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_directory_c[gpu_id]), h_trainc->dlength * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_counter_c[gpu_id]), (h_trainc->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(hipMalloc((void**)&(d_entries_c[gpu_id]), h_trainc->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&(d_dims_c[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(hipMemcpy(d_directory_c[gpu_id], h_trainc->directory, h_trainc->dlength*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_counter_c[gpu_id], h_trainc->dcounter, (h_trainc->dlength + 1)*sizeof(idx_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_entries_c[gpu_id], h_trainc->entries, h_trainc->size * DEFAULT_T_TILE_WIDTH * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_dims_c[gpu_id], h_trainc->dims, nmodes*sizeof(idx_t), hipMemcpyHostToDevice));
d_itemp1 = h_trainc->directory;
d_itemp2 = h_trainc->dims;
d_ftemp = h_trainc->entries;
d_itemp3 = h_trainc->dcounter;
h_trainc->directory = d_directory_c[gpu_id];
h_trainc->dims = d_dims_c[gpu_id];
h_trainc->entries = d_entries_c[gpu_id];
h_trainc->dcounter = d_counter_c[gpu_id];
HANDLE_ERROR(hipMemcpy(d_trainc[gpu_id], h_trainc, sizeof(cissbasic_t), hipMemcpyHostToDevice));
h_trainc->directory = d_itemp1;
h_trainc->dims = d_itemp2;
h_trainc->entries = d_ftemp;
h_trainc->dcounter = d_itemp3;
//buffer for HTH
maxdlength[gpu_id] = SS_MAX(SS_MAX(h_traina->dlength, h_trainb->dlength),h_trainc->dlength);
maxnnz[gpu_id] = SS_MAX(SS_MAX(h_traina->nnz, h_trainb->nnz),h_trainc->nnz);
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d the cpu maxnnz is %ld\n", cpu_thread_id,maxnnz[gpu_id]);
#endif
HANDLE_ERROR(hipMalloc((void**)&(d_hbuffer[gpu_id]), DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength[gpu_id] * sizeof(double)));
//HANDLE_ERROR(hipMalloc((void**)&(d_hthbuffer[gpu_id]), DEFAULT_NFACTORS * maxnnz[gpu_id] * sizeof(double)));
//HANDLE_ERROR(hipMalloc((void**)&d_invbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)));
//buffer for inversion
HANDLE_ERROR(hipMalloc((void**)&(d_hbufptr[gpu_id]), maxdlength[gpu_id] * sizeof(double*)));
HANDLE_ERROR(hipMalloc((void**)&(d_factptr[gpu_id]), maxdlength[gpu_id] * sizeof(double*)));
HANDLE_ERROR(hipMalloc((void**)&(d_infoarray[gpu_id]), maxdlength[gpu_id] * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&(d_factora[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_a[gpu_id]), mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_a[gpu_id], mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[0]->values;
mats[0]->values = d_value_a[gpu_id];
HANDLE_ERROR(hipMemcpy(d_factora[gpu_id], mats[0], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[0]->values = d_ftemp;
}
#pragma omp barrier
HANDLE_ERROR(hipMalloc((void**)&(d_factorb[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_b[gpu_id]), mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_b[gpu_id], mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[1]->values;
mats[1]->values = d_value_b[gpu_id];
HANDLE_ERROR(hipMemcpy(d_factorb[gpu_id], mats[1], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[1]->values = d_ftemp;
}
#pragma omp barrier
HANDLE_ERROR(hipMalloc((void**)&(d_factorc[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(hipMalloc((void**)&(d_value_c[gpu_id]), mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(hipMemcpy(d_value_c[gpu_id], mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[2]->values;
mats[2]->values = d_value_c[gpu_id];
HANDLE_ERROR(hipMemcpy(d_factorc[gpu_id], mats[2], sizeof(ordi_matrix), hipMemcpyHostToDevice));
mats[2]->values = d_ftemp;
}
}
#ifdef CUDA_LOSS //to be done
sptensor_gpu_t * d_test, * d_validate;
#else
double loss = tc_loss_sq(traina, mats, algorithm_index);
double frobsq = tc_frob_sq(nmodes, regularization_index, mats);
#endif
tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, 0, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs);
//step into the kernel
idx_t mode_i, mode_n, m;
hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER;
for(idx_t e=1; e < DEFAULT_MAX_ITERATE+1; ++e) {
gettimeofday(&start,NULL);
//can set random variables
srand(time(0));
mode_i = rand()%3;
#ifdef ALSAS_DEBUG
mode_i = 0;
fprintf(stdout, "now the mode_i is %d\n", mode_i);
#endif
for(m=0; m < 3; m++) {
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
hipSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
hipsolverDnHandle_t handle;
if(!cpu_thread_id) handle = handle1;
else handle = handle0;
cissbasic_t * h_traina = h_cissta->cissunits[cpu_thread_id];
cissbasic_t * h_trainb = h_cisstb->cissunits[cpu_thread_id];
cissbasic_t * h_trainc = h_cisstc->cissunits[cpu_thread_id];
idx_t mymode_n = (mode_i + m)%3;
idx_t blocknum_u, blocknum_h, nnz, tilenum, blocknum_m;
HANDLE_ERROR(hipMemset(d_hbuffer[cpu_thread_id], 0, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength[cpu_thread_id] * sizeof(double)));
//HANDLE_ERROR(hipMemcpy(d_invbuffer, h_invbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)),hipMemcpyHostToDevice);
switch (mymode_n)
{
case 0:
{
nnz = h_traina->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d, nnz is %d, blocknum_m is %d, tilenum is %d\n", cpu_thread_id, nnz, blocknum_m, tilenum);
#endif
HANDLE_ERROR(hipMemset(d_value_a[cpu_thread_id], 0, mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_traina->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_traina->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
hipLaunchKernelGGL(( p_mttkrp_gpu_as), dim3(blocknum_m),dim3(DEFAULT_BLOCKSIZE),0, 0, d_traina[cpu_thread_id], d_factora[cpu_thread_id], d_factorb[cpu_thread_id], d_factorc[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(hipDeviceSynchronize());
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d ends mttkrp\n", cpu_thread_id);
fprintf(stdout, "now in thread %d the blocknum for hth update is %ld and the dlength is %ld\n", cpu_thread_id, blocknum_h, h_traina->dlength);
#endif
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_traina[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength, regularization_index);
hipLaunchKernelGGL(( p_update_matrice), dim3(blocknum_u), dim3(DEFAULT_BLOCKSIZE), 0, 0, d_traina[cpu_thread_id], d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength, regularization_index);
HANDLE_ERROR(hipDeviceSynchronize());
#ifdef ALS_DEBUG
p_cholecheck(d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength);
#endif
HANDLE_SOLVERERR(hipsolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_traina->dlength));
HANDLE_SOLVERERR(hipsolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_traina->dlength));
HANDLE_ERROR(hipDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(hipMemcpy(mats[0]->values + (h_cissta->d_ref[cpu_thread_id] -1) * DEFAULT_NFACTORS, d_value_a[cpu_thread_id] + (h_cissta->d_ref[cpu_thread_id] -1) * DEFAULT_NFACTORS, (h_cissta->d_ref[cpu_thread_id + 1] - h_cissta->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(d_value_a[cpu_thread_id] + (h_cissta->d_ref[(cpu_thread_id + 1)% deviceCount] - 1) * DEFAULT_NFACTORS, mats[0]->values + (h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount] -1 ) * DEFAULT_NFACTORS, (h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipDeviceSynchronize());
break;
}
case 1:
{
nnz = h_trainb->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
HANDLE_ERROR(hipMemset(d_value_b[cpu_thread_id], 0, mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_trainb->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_trainb->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
hipLaunchKernelGGL(( p_mttkrp_gpu_as), dim3(blocknum_m),dim3(DEFAULT_BLOCKSIZE),0, 0, d_trainb[cpu_thread_id], d_factorb[cpu_thread_id], d_factorc[cpu_thread_id], d_factora[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(hipDeviceSynchronize());
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_trainb[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_b[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainb->dlength, regularization_index);
HANDLE_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( p_update_matrice), dim3(blocknum_u), dim3(DEFAULT_BLOCKSIZE), 0, 0, d_trainb[cpu_thread_id], d_value_b[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainb->dlength, regularization_index);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_SOLVERERR(hipsolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainb->dlength));
HANDLE_SOLVERERR(hipsolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id],DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainb->dlength));
HANDLE_ERROR(hipDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(hipMemcpy(mats[1]->values + (h_cisstb->d_ref[cpu_thread_id] - 1) * DEFAULT_NFACTORS, d_value_b[cpu_thread_id] + (h_cisstb->d_ref[cpu_thread_id] - 1)* DEFAULT_NFACTORS, (h_cisstb->d_ref[cpu_thread_id + 1] - h_cisstb->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(d_value_b[cpu_thread_id] + (h_cisstb->d_ref[(cpu_thread_id + 1)% deviceCount] - 1)* DEFAULT_NFACTORS, mats[1]->values + (h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount] - 1)* DEFAULT_NFACTORS, (h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipDeviceSynchronize());
break;
}
default:
{
nnz = h_trainc->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
HANDLE_ERROR(hipMemset(d_value_c[cpu_thread_id], 0, mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_trainc->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_trainc->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
hipLaunchKernelGGL(( p_mttkrp_gpu_as), dim3(blocknum_m),dim3(DEFAULT_BLOCKSIZE),0, 0, d_trainc[cpu_thread_id], d_factorc[cpu_thread_id], d_factora[cpu_thread_id], d_factorb[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(hipDeviceSynchronize());
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_trainc[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_c[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainc->dlength, regularization_index);
hipLaunchKernelGGL(( p_update_matrice), dim3(blocknum_u), dim3(DEFAULT_BLOCKSIZE), 0, 0, d_trainc[cpu_thread_id], d_value_c[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainc->dlength, regularization_index);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_SOLVERERR(hipsolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainc->dlength));
HANDLE_SOLVERERR(hipsolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id],DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainc->dlength));
HANDLE_ERROR(hipDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(hipMemcpy(mats[2]->values + (h_cisstc->d_ref[cpu_thread_id] -1 ) * DEFAULT_NFACTORS, d_value_c[cpu_thread_id] + (h_cisstc->d_ref[cpu_thread_id] - 1) * DEFAULT_NFACTORS, (h_cisstc->d_ref[cpu_thread_id + 1] - h_cisstc->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(d_value_c[cpu_thread_id] + (h_cisstc->d_ref[(cpu_thread_id + 1)% deviceCount] -1) * DEFAULT_NFACTORS, mats[2]->values + (h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount] -1)* DEFAULT_NFACTORS, (h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipDeviceSynchronize());
break;
}
//p_update_als(train, mats, m, DEFAULT_NFACTORS, regularization_index);
}
}
}
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
#ifdef DEBUG
matrix_display(mats[0]);
matrix_display(mats[1]);
matrix_display(mats[2]);
#endif
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
/* compute new obj value, print stats, and exit if converged */
loss = tc_loss_sq(traina, mats, algorithm_index);
frobsq = tc_frob_sq(nmodes, regularization_index, mats);
if(tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, e, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs)) {
break;
}
} /* foreach iteration */
hipSetDevice(0);
HANDLE_SOLVERERR(hipsolverDnDestroy(handle0));
hipSetDevice(1);
HANDLE_SOLVERERR(hipsolverDnDestroy(handle1));
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
hipSetDevice(cpu_thread_id % deviceCount);
//end the cusolver
//HANDLE_SOLVERERR(hipsolverDnDestroy(handle));
//free the cudabuffer
hipFree(d_counter_a[cpu_thread_id]);
hipFree(d_directory_a[cpu_thread_id]);
hipFree(d_dims_a[cpu_thread_id]);
hipFree(d_entries_a[cpu_thread_id]);
hipFree(d_counter_b[cpu_thread_id]);
hipFree(d_directory_b[cpu_thread_id]);
hipFree(d_dims_b[cpu_thread_id]);
hipFree(d_entries_b[cpu_thread_id]);
hipFree(d_counter_c[cpu_thread_id]);
hipFree(d_directory_c[cpu_thread_id]);
hipFree(d_dims_c[cpu_thread_id]);
hipFree(d_entries_c[cpu_thread_id]);
hipFree(d_hbuffer[cpu_thread_id]);
hipFree(d_hbufptr[cpu_thread_id]);
//hipFree(d_hthbuffer[cpu_thread_id]);
hipFree(d_factptr[cpu_thread_id]);
hipFree(d_infoarray[cpu_thread_id]);
hipFree(d_value_a[cpu_thread_id]);
hipFree(d_value_b[cpu_thread_id]);
hipFree(d_value_c[cpu_thread_id]);
hipFree(d_traina[cpu_thread_id]);
hipFree(d_trainb[cpu_thread_id]);
hipFree(d_trainc[cpu_thread_id]);
hipFree(d_factora[cpu_thread_id]);
hipFree(d_factorb[cpu_thread_id]);
hipFree(d_factorc[cpu_thread_id]);
//hipFree(d_hthbuffer[cpu_thread_id]);
hipDeviceReset();
}
ciss_free(h_cissta, deviceCount);
ciss_free(h_cisstb, deviceCount);
ciss_free(h_cisstc, deviceCount);
free(d_traina);
free(d_trainb);
free(d_trainc);
free(d_directory_a);
free(d_directory_b);
free(d_directory_c);
free(d_counter_a);
free(d_counter_b);
free(d_counter_c);
free(d_dims_a);
free(d_dims_b);
free(d_dims_c);
free(d_entries_a);
free(d_entries_b);
free(d_entries_c);
free(d_hbuffer);
//free(d_hthbuffer);
free(d_hbufptr);
free(d_infoarray);
free(d_factptr);
//free(handle);
free(d_factora);
free(d_factorb);
free(d_factorc);
free(d_value_a);
free(d_value_b);
free(d_value_c);
free(maxdlength);
free(maxnnz);
}
}
| d4adda14681efa76839697761061015f1c947541.cu | extern "C"
{
#include "completion.h"
#include "base.h"
#include "ciss.h"
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
#include <time.h>
#include <stdint.h>
}
#include "als.cuh"
#include "loss.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusparse_v2.h>
#include <cusolver_common.h>
#include <cusolverDn.h>
#include <omp.h>
#define HANDLE_SOLVERERR( err ) (HandleSolverErr( err, __FILE__, __LINE__ ))
static void HandleSolverErr( cusolverStatus_t err, const char *file, int line )
{
if(err != CUSOLVER_STATUS_SUCCESS)
{
fprintf(stderr, "ERROR: in %s at line %d (error-code %d)\n",
file, line, err );
fflush(stdout);
exit(-1);
}
}
// gpu global function
/**
* @brief For computing the mttkrp in als
* @version Now only contains the atomic operation
*/
__global__ void p_mttkrp_gpu(cissbasic_t* d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double * d_hbuffer,
idx_t tilenum
)
{
//get thread and block index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
uint8_t flag;
double * entries = d_traina -> entries;
idx_t localtile = tileid * ((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
double __align__(256) localtbuffer[6];
double __align__(256) localmbuffer[2 * DEFAULT_NFACTORS];
//do the mttkrp
if(tileid < tilenum)
{
//get supportive information for tiles
idx_t f_id = (idx_t)(entries[localtile] * (-1)) ;
idx_t l_id = (idx_t)(entries[localtile+1] * (-1)) ;
idx_t bitmap = (idx_t)(entries[localtile+2]);
//if(bitmap == 0) break;
#ifdef DEBUG
if(tileid == 0)
{
printf("f_id %ld, l_id %ld, bitmap %ld\n", f_id, l_id, bitmap);
}
#endif
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = bitmap >> 1;}
bitmap = bitmap >> 1;
localtile += DEFAULT_T_TILE_WIDTH;
#ifdef DEBUG
if(tileid == 0)
{
printf("f_id %ld, l_id %ld, bitmap %ld\n", f_id, l_id, bitmap);
}
#endif
//load in vectorize
for(int m = 0; m < ((idx_t)DEFAULT_T_TILE_LENGTH) / 2; m++ )
{
//unroll loop and load
//((double2*)localtbuffer)[0] = ((double2*)(entries+localtile))[0];
//((double2*)localtbuffer)[1] = ((double2*)(entries+localtile))[1];
//((double2*)localtbuffer)[2] = ((double2*)(entries+localtile))[2];
localtbuffer[0] = entries[localtile];
localtbuffer[1] = entries[localtile + 1];
localtbuffer[2] = entries[localtile + 2];
localtbuffer[3] = entries[localtile + 3];
localtbuffer[4] = entries[localtile + 4];
localtbuffer[5] = entries[localtile + 5];
//do the mttkrp for the first
f_id = f_id + (!(bitmap & 1));
idx_t tmpi = d_traina->directory[f_id];
tmpi--;
#ifdef DEBUG
printf("the fid is %d\n", f_id);
#endif
bitmap = bitmap >> 1;
if((localtbuffer[0] == -1) && (localtbuffer[1] == -1)) break;
for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
double b = d_factorb->values[((idx_t)localtbuffer[0]*DEFAULT_NFACTORS - DEFAULT_NFACTORS ) + j];
double c = d_factorc->values[((idx_t)localtbuffer[1]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[j] * localtbuffer[2]);
}
//if(localtbuffer[0] == -1 && localtbuffer[1] == -1) break;
/*for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
idx_t b = d_factorb->values[(idx_t)(localtbuffer[0]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
idx_t c = d_factorc->values[(idx_t)(localtbuffer[1]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[j] * localtbuffer[2]);
}*/
//do the mttkrp for the second
flag = !(bitmap & 1);
f_id = f_id + (!(bitmap & 1));
#ifdef DEBUG
printf("the fid is %d\n", f_id);
#endif
tmpi = d_traina->directory[f_id];
tmpi--;
bitmap = bitmap >> 1;
if((localtbuffer[0] == -1) && (localtbuffer[1] == -1)) break;
for(int j = 0; j < DEFAULT_NFACTORS; j++)
{
idx_t b = d_factorb->values[((idx_t)localtbuffer[3]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
idx_t c = d_factorc->values[((idx_t)localtbuffer[4]*DEFAULT_NFACTORS - DEFAULT_NFACTORS) + j];
localmbuffer[DEFAULT_NFACTORS + j] = b * c;
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + j]), localmbuffer[DEFAULT_NFACTORS + j] * localtbuffer[5]);
}
//compute the HTH for the first
//compute the HTH for the second
if(flag)
{
for(int i = 0; i < DEFAULT_NFACTORS; i++)
{
for(int j = 0; j <=i ; j++)
{
double presult1 = localmbuffer[i] * localmbuffer[j];
double presult2 = localmbuffer[DEFAULT_NFACTORS + i] * localmbuffer[DEFAULT_NFACTORS + j];
atomicAdd(&(d_hbuffer[(f_id - flag) * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult1);
atomicAdd(&(d_hbuffer[f_id * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult2);
}
}
}
else
{
for(int i = 0; i < DEFAULT_NFACTORS; i++)
{
for(int j = 0; j <=i ; j++)
{
double presult = localmbuffer[i] * localmbuffer[j] + localmbuffer[DEFAULT_NFACTORS + i] * localmbuffer[DEFAULT_NFACTORS + j];
atomicAdd(&(d_hbuffer[f_id * DEFAULT_NFACTORS * DEFAULT_NFACTORS + i * DEFAULT_NFACTORS + j]), presult);
}
}
}
localtile += 2*DEFAULT_T_TILE_WIDTH;
}
}
}
/**
* @brief For computing the mttkrp in als, only one element on one thread
* @version Now reduce atmoic add with segment scan
*/
__global__ void p_mttkrp_gpu_as(cissbasic_t* d_traina,
ordi_matrix * d_factora,
ordi_matrix * d_factorb,
ordi_matrix * d_factorc,
double * d_hbuffer,
//double * d_hthbuffer,
idx_t tilenum)
{
//get block, warp and thread index
__shared__ uint32_t warpmask[((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)];
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t warpid = tid / ((idx_t)ALS_WARPSIZE);
idx_t laneid = tid % ((idx_t)ALS_WARPSIZE);
idx_t tileid = bid * ((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) + warpid;
double * entries = d_traina -> entries;
idx_t localtile = tileid * ((DEFAULT_T_TILE_LENGTH + 1) * DEFAULT_T_TILE_WIDTH);
double __align__(256) localtbuffer[3];
double __align__(256) localmbuffer[DEFAULT_NFACTORS];
double mytmp = 0, myntmp = 0;
//initialize the warp mask
if(laneid == 0) warpmask[warpid] = 0xffffffff;
if((tileid * DEFAULT_T_TILE_LENGTH + laneid) == d_traina->nnz)
{
//redefine the mask
warpmask[warpid] = __brev((warpmask[warpid]<<(32-laneid)));
}
__syncwarp();
uint32_t mymask = warpmask[warpid];
#ifdef ALSAS_DEBUG
//printf("now the mymask and mynnz id in thread %ld are %x and %ld\n", tid, mymask, (tileid * DEFAULT_T_TILE_LENGTH + laneid));
#endif
if((tileid < tilenum) && ((tileid * DEFAULT_T_TILE_LENGTH + laneid)<d_traina->nnz))
{
//initialize the information for tile and local entry
idx_t f_id = (idx_t)(entries[localtile] * (-1)) ;
idx_t l_id = (idx_t)(entries[localtile+1] * (-1)) ;
idx_t bitmap = (idx_t)(entries[localtile+2]);
if(bitmap != 0)
{
bitmap = __brevll(bitmap);
while((bitmap & 1) == 0) {bitmap = bitmap >> 1;}
bitmap = bitmap >> 1;
idx_t itercounter = __popcll(bitmap) - (bitmap & 1);
#ifdef ALSAS_DEBUG
//if(laneid == 0)
//printf("now the itercounter is %ld\n", itercounter);
#endif
idx_t myfid = f_id + laneid - __popcll((bitmap << (63-laneid))) + 1;
#ifdef ALSAS_DEBUG
//printf("now the myfid in thread %ld is %ld\n", tid, myfid);
#endif
idx_t mybit = ((bitmap >> (laneid)) & 1);
idx_t mylbit = mybit;
if(laneid == 0)
{
mylbit = 0;
mybit = 1;
}
//inter thread computation
localtbuffer[0] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH];
localtbuffer[1] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH + 1];
localtbuffer[2] = entries[localtile + (laneid + 1) * DEFAULT_T_TILE_WIDTH + 2];
idx_t tmpi = d_traina->directory[myfid] - 1;
idx_t b = (idx_t)localtbuffer[0] - 1;
idx_t c = (idx_t)localtbuffer[1] - 1;
//for the hadamard
#ifdef ALSAS_DEBUG
//printf("now the myposition for hthbuffer in thread %ld is %ld\n", tid, (tileid * DEFAULT_T_TILE_LENGTH + laneid));
#endif
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
localmbuffer[m] = d_factorb->values[b * DEFAULT_NFACTORS + m] * d_factorc->values[c * DEFAULT_NFACTORS + m];
//d_hthbuffer[(tileid * DEFAULT_T_TILE_LENGTH + laneid)*DEFAULT_NFACTORS + m] = localmbuffer[m];
}
__syncwarp(mymask);
//reduction in hth
//mytmp: final partial result; myntmp: messages
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
for(int j = 0; j <=m ; j++)
{
mytmp = localmbuffer[m] * localmbuffer[j];
myntmp = mybit * mytmp;
__syncwarp(mymask);
//now the reduction
for(int i = 0; i < itercounter; i++)
{
mytmp = (__shfl_down_sync(mymask, myntmp, 1, (int)ALS_WARPSIZE)) + (!(mylbit)) * mytmp;
myntmp = mybit * mytmp;
__syncwarp(mymask);
}
if(!mybit)
{
atomicAdd(&(d_hbuffer[myfid * DEFAULT_NFACTORS * DEFAULT_NFACTORS + m * DEFAULT_NFACTORS + j]), mytmp);
}
__syncwarp(mymask);
}
}
__syncwarp(mymask);
//reduction in mttkrp
for(int m = 0; m < DEFAULT_NFACTORS; m++)
{
mytmp = localmbuffer[m] * localtbuffer[2];
myntmp = mybit * mytmp;
__syncwarp(mymask);
//now the reduction
for(int i = 0; i < itercounter; i++)
{
mytmp = (__shfl_down_sync(mymask, myntmp, 1, (int)ALS_WARPSIZE)) + (!(mylbit)) * mytmp;
myntmp = mybit * mytmp;
__syncwarp(mymask);
}
if(!mybit)
{
atomicAdd(&(d_factora->values[tmpi * DEFAULT_NFACTORS + m]), mytmp);
}
__syncwarp(mymask);
}
}
}
__syncthreads();
}
/**
* @brief For update the H matrices and prepare for inversion as well as equation
* @version Warp shuffle
**/
__global__ void p_hth_update_as(cissbasic_t * d_traina,
double * d_hthbuffer,
double * d_value_a,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength,
double regularization_index)
{
__shared__ double blkmbuffer[((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) * (idx_t)DEFAULT_NFACTORS];
//get block, warp and thread index
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t warpid = tid / ((idx_t)ALS_WARPSIZE);
idx_t laneid = tid % ((idx_t)ALS_WARPSIZE);
idx_t tileid = bid * ((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE) + warpid;
double __align__(256) localhthbuffer[DEFAULT_NFACTORS]={0};
if(tileid < dlength && laneid < DEFAULT_NFACTORS)
{
idx_t dcounter = d_traina->dcounter[tileid+1] - d_traina->dcounter[tileid];
#ifdef ALSAS_DEBUG
if(laneid == 0) printf("my dcounter is %ld\n and my tileid is %ld\n", dcounter, tileid);
#endif
idx_t basicposition = d_traina->dcounter[tileid];
idx_t basicsposition = warpid * DEFAULT_NFACTORS;
for(idx_t i = 0; i < dcounter; i++)
{
double localvalue = d_hthbuffer[(basicposition + i) * DEFAULT_NFACTORS + laneid];
blkmbuffer[basicsposition + laneid] = localvalue;
__syncwarp();
for(idx_t j = 0; j < DEFAULT_NFACTORS; j++)
{
localhthbuffer[j] += localvalue * blkmbuffer[basicsposition + j];
}
}
__syncwarp();
localhthbuffer[laneid] += regularization_index;
for(idx_t i = 0; i < DEFAULT_NFACTORS; i++)
{
d_hbuffer[tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS + laneid * DEFAULT_NFACTORS + i] = localhthbuffer[i];
}
__syncwarp();
//prepare for ptrs
if(laneid == 0)
{
idx_t fid = d_traina->directory[tileid] - 1;
d_factptr[tileid] = d_value_a + fid * DEFAULT_NFACTORS;
d_hbufptr[tileid] = d_hbuffer + tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
}
}
__syncwarp();
}
/**
* @brief Compute the inverse and finish the final update
* @version Now only with coarse grain
*/
__global__ void p_update_als_gpu(cissbasic_t * d_traina,
ordi_matrix * d_factora,
double * d_hbuffer,
idx_t dlength,
double regularization_index
)
{
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
idx_t basicposition = tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
double lv[DEFAULT_NFACTORS * DEFAULT_NFACTORS]={0};
if(tileid < dlength)
{
//compute the inverse
idx_t tmpi = d_traina->directory[tileid];
tmpi--;
double *av = d_hbuffer + basicposition;
idx_t i = 0;
idx_t j = 0;
idx_t k = 0;
for (i = 0; i < DEFAULT_NFACTORS; ++i)
{
for (j = 0; j <= i; ++j)
{
double inner = 0;
for (k = 0; k < j; ++k)
{
inner += lv[k+(i*DEFAULT_NFACTORS)] * lv[k+(j*DEFAULT_NFACTORS)];
}
if(i == j)
{
lv[j+(i*DEFAULT_NFACTORS)] = sqrt(av[i+(i*DEFAULT_NFACTORS)] - inner + regularization_index);
}
else
{
lv[j+(i*DEFAULT_NFACTORS)] = 1.0 / lv[j+(j*DEFAULT_NFACTORS)] * (av[j+(i*DEFAULT_NFACTORS)] - inner);
}
}
}
for(i = 0; i< DEFAULT_NFACTORS * DEFAULT_NFACTORS; i++)
{
av[i] = 0;
}
idx_t n = 0;
for(n=0; n<DEFAULT_NFACTORS; n++) //get identity matrix
{
av[n+(n*DEFAULT_NFACTORS)] = 1.0;
}
//forward solve
i = 1; //define counters outside the loop
j = 0;
idx_t f = 0;
for(j=0; j < DEFAULT_NFACTORS; ++j)
{
av[j] /= lv[0];
}
for(i=1; i < DEFAULT_NFACTORS; ++i)
{
/* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} L(i,j)X(i,j) */
for(j=0; j < i; ++j)
{
for(f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] -= lv[j+(i*DEFAULT_NFACTORS)] * av[f+(j*DEFAULT_NFACTORS)];
}
}
for(f=0; f <DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
}
for(i=0; i < DEFAULT_NFACTORS; ++i)
{
for(j=i+1; j < DEFAULT_NFACTORS; ++j)
{
lv[j+(i*DEFAULT_NFACTORS)] = lv[i+(j*DEFAULT_NFACTORS)];
lv[i+(j*DEFAULT_NFACTORS)] = 0.0;
}
}
//backsolve
f = 0; //set counters
j = 0;
idx_t row = 2;
/* last row of X is easy */
for(f=0; f < DEFAULT_NFACTORS; ++f) {
i = DEFAULT_NFACTORS - 1;
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
/* now do backward substitution */
for(row=2; row <= DEFAULT_NFACTORS; ++row)
{
i = DEFAULT_NFACTORS - row;
/* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} R(i,j)X(i,j) */
for( j=i+1; j < DEFAULT_NFACTORS; ++j)
{
for( f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] -= lv[j+(i*DEFAULT_NFACTORS)] * av[f+( j * DEFAULT_NFACTORS )];
}
}
for(f=0; f < DEFAULT_NFACTORS; ++f)
{
av[f+(i*DEFAULT_NFACTORS)] /= lv[i+(i*DEFAULT_NFACTORS)];
}
}
//now do the final update
double * mvals = d_factora->values + tmpi * DEFAULT_NFACTORS;
for(i = 0; i < DEFAULT_NFACTORS; i++)
{
lv[i] = 0;
for(j = 0; j < DEFAULT_NFACTORS; j++)
{
lv[i] += mvals[j] * av[i * DEFAULT_NFACTORS + j];
}
}
//the final transmission
for(i = 0; i < DEFAULT_NFACTORS/2; i++)
{
((double2*)mvals)[i] = ((double2*)lv)[i];
}
}
}
/**
* @brief Update the matrice
* @version Now only with coarse grain
*/
__global__ void p_update_matrice(cissbasic_t * d_traina,
double * d_value_a,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength,
double regularization_index)
{
idx_t bid = blockIdx.x;
idx_t tid = threadIdx.x;
idx_t tileid = bid * DEFAULT_BLOCKSIZE + tid;
idx_t basicposition = tileid * DEFAULT_NFACTORS * DEFAULT_NFACTORS;
if(tileid < dlength)
{
idx_t tmpi = d_traina->directory[tileid] - 1;
for(idx_t f = 0; f < DEFAULT_NFACTORS; f++)
{
d_hbuffer[basicposition + f*DEFAULT_NFACTORS + f] += regularization_index;
}
d_hbufptr[tileid] = d_hbuffer + basicposition;
d_factptr[tileid] = d_value_a + tmpi * DEFAULT_NFACTORS;
}
}
void p_cholecheck(double * d_factora,
double * d_hbuffer,
double ** d_hbufptr,
double ** d_factptr,
idx_t dlength)
{
}
extern "C"{
/**
* @brief The main function for tensor completion in als
* @param train The tensor for generating factor matrices
* @param validation The tensor for validation(RMSE)
* @param test The tensor for testing the quality
* @param regularization_index Lambda
*/
void tc_als(sptensor_t * traina,
sptensor_t * trainb,
sptensor_t * trainc,
sptensor_t * validation,
sptensor_t * test,
ordi_matrix ** mats,
ordi_matrix ** best_mats,
idx_t algorithm_index,
double regularization_index,
double * best_rmse,
double * tolerance,
idx_t * nbadepochs,
idx_t * bestepochs,
idx_t * max_badepochs)
{
idx_t const nmodes = traina->nmodes;
#ifdef CISS_DEBUG
printf("enter the als\n");
#endif
//initialize the devices
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int n;
//print the GPU status
for(n = 0; n < deviceCount; n++)
{
cudaDeviceProp dprop;
cudaGetDeviceProperties(&dprop, n);
printf(" %d: %s\n", n, dprop.name);
}
omp_set_num_threads(deviceCount);
//prepare the tensor in TB-COO
ciss_t * h_cissta = ciss_alloc(traina, 1, deviceCount);
ciss_t * h_cisstb = ciss_alloc(trainb, 2, deviceCount);
ciss_t * h_cisstc = ciss_alloc(trainc, 3, deviceCount);
#ifdef MCISS_DEBUG
fprintf(stdout, "the new tensors for mode 0\n");
cissbasic_display(h_cissta->cissunits[0]);
cissbasic_display(h_cissta->cissunits[1]);
#endif
struct timeval start;
struct timeval end;
idx_t diff;
cissbasic_t ** d_traina = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
cissbasic_t ** d_trainb = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
cissbasic_t ** d_trainc = (cissbasic_t**)malloc(deviceCount * sizeof(cissbasic_t*));
idx_t ** d_directory_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_directory_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_directory_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_counter_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_a = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_b = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
idx_t ** d_dims_c = (idx_t**)malloc(deviceCount * sizeof(idx_t*));
double ** d_entries_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_entries_b = (double**)malloc(deviceCount * sizeof(double*));
double ** d_entries_c = (double**)malloc(deviceCount * sizeof(double*));
double ** d_hbuffer = (double**)malloc(deviceCount * sizeof(double*));
//double ** d_hthbuffer = (double**)malloc(deviceCount * sizeof(double*));
int ** d_infoarray = (int**)malloc(deviceCount * sizeof(int*));
double *** d_hbufptr = (double***)malloc(deviceCount * sizeof(double**));
double *** d_factptr = (double***)malloc(deviceCount * sizeof(double**));
ordi_matrix ** d_factora = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorb = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
ordi_matrix ** d_factorc = (ordi_matrix**)malloc(deviceCount * sizeof(ordi_matrix*));
double ** d_value_a = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_b = (double**)malloc(deviceCount * sizeof(double*));
double ** d_value_c = (double**)malloc(deviceCount * sizeof(double*));
idx_t * maxdlength = (idx_t*)malloc(deviceCount * sizeof(idx_t));
idx_t * maxnnz = (idx_t*)malloc(deviceCount * sizeof(idx_t));
cusolverDnHandle_t handle0, handle1;
cudaSetDevice(0);
HANDLE_SOLVERERR(cusolverDnCreate((&handle0)));
cudaSetDevice(1);
HANDLE_SOLVERERR(cusolverDnCreate((&handle1)));
#pragma omp parallel
{
//prepare the threads
unsigned int cpu_thread_id = omp_get_thread_num();
unsigned int num_cpu_threads = omp_get_num_threads();
//set gpus
int gpu_id = -1;
cudaSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
cudaGetDevice(&gpu_id);
idx_t * d_itemp1, *d_itemp2, *d_itemp3;
double * d_ftemp;
//initialize the cusolver
//HANDLE_SOLVERERR(cusolverDnCreate((&(handle[gpu_id]))));
//malloc and copy the tensors + matrices to gpu
cissbasic_t * h_traina = h_cissta->cissunits[gpu_id];
cissbasic_t * h_trainb = h_cisstb->cissunits[gpu_id];
cissbasic_t * h_trainc = h_cisstc->cissunits[gpu_id];
//copy tensor for mode-1
HANDLE_ERROR(cudaMalloc((void**)&(d_traina[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_directory_a[gpu_id]), h_traina->dlength * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_counter_a[gpu_id]), (h_traina->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_entries_a[gpu_id]), h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&(d_dims_a[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(cudaMemcpy(d_counter_a[gpu_id], h_traina->dcounter, (h_traina->dlength + 1)*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_directory_a[gpu_id], h_traina->directory, h_traina->dlength*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_entries_a[gpu_id], h_traina->entries, h_traina->size * DEFAULT_T_TILE_WIDTH * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_dims_a[gpu_id], h_traina->dims, nmodes*sizeof(idx_t), cudaMemcpyHostToDevice));
d_itemp1 = h_traina->directory;
d_itemp2 = h_traina->dims;
d_itemp3 = h_traina->dcounter;
d_ftemp = h_traina->entries;
h_traina->directory = d_directory_a[gpu_id];
h_traina->dims = d_dims_a[gpu_id];
h_traina->entries = d_entries_a[gpu_id];
h_traina->dcounter = d_counter_a[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_traina[gpu_id], h_traina, sizeof(cissbasic_t), cudaMemcpyHostToDevice));
h_traina->directory = d_itemp1;
h_traina->dims = d_itemp2;
h_traina->entries = d_ftemp;
h_traina->dcounter = d_itemp3;
//copy tensor for mode-2
HANDLE_ERROR(cudaMalloc((void**)&(d_trainb[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_directory_b[gpu_id]), h_trainb->dlength * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_counter_b[gpu_id]), (h_trainb->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_entries_b[gpu_id]), h_trainb->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&(d_dims_b[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(cudaMemcpy(d_directory_b[gpu_id], h_trainb->directory, h_trainb->dlength*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_counter_b[gpu_id], h_trainb->dcounter, (h_trainb->dlength + 1)*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_entries_b[gpu_id], h_trainb->entries, h_trainb->size * DEFAULT_T_TILE_WIDTH * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_dims_b[gpu_id], h_trainb->dims, nmodes*sizeof(idx_t), cudaMemcpyHostToDevice));
d_itemp1 = h_trainb->directory;
d_itemp2 = h_trainb->dims;
d_itemp3 = h_trainb->dcounter;
d_ftemp = h_trainb->entries;
h_trainb->directory = d_directory_b[gpu_id];
h_trainb->dims = d_dims_b[gpu_id];
h_trainb->entries = d_entries_b[gpu_id];
h_trainb->dcounter = d_counter_b[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_trainb[gpu_id], h_trainb, sizeof(cissbasic_t), cudaMemcpyHostToDevice));
h_trainb->directory = d_itemp1;
h_trainb->dims = d_itemp2;
h_trainb->entries = d_ftemp;
h_trainb->dcounter = d_itemp3;
//copy tensor for mode-3
HANDLE_ERROR(cudaMalloc((void**)&(d_trainc[gpu_id]), sizeof(cissbasic_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_directory_c[gpu_id]), h_trainc->dlength * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_counter_c[gpu_id]), (h_trainc->dlength + 1) * sizeof(idx_t)));
HANDLE_ERROR(cudaMalloc((void**)&(d_entries_c[gpu_id]), h_trainc->size * DEFAULT_T_TILE_WIDTH * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&(d_dims_c[gpu_id]), nmodes * sizeof(idx_t)));
HANDLE_ERROR(cudaMemcpy(d_directory_c[gpu_id], h_trainc->directory, h_trainc->dlength*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_counter_c[gpu_id], h_trainc->dcounter, (h_trainc->dlength + 1)*sizeof(idx_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_entries_c[gpu_id], h_trainc->entries, h_trainc->size * DEFAULT_T_TILE_WIDTH * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_dims_c[gpu_id], h_trainc->dims, nmodes*sizeof(idx_t), cudaMemcpyHostToDevice));
d_itemp1 = h_trainc->directory;
d_itemp2 = h_trainc->dims;
d_ftemp = h_trainc->entries;
d_itemp3 = h_trainc->dcounter;
h_trainc->directory = d_directory_c[gpu_id];
h_trainc->dims = d_dims_c[gpu_id];
h_trainc->entries = d_entries_c[gpu_id];
h_trainc->dcounter = d_counter_c[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_trainc[gpu_id], h_trainc, sizeof(cissbasic_t), cudaMemcpyHostToDevice));
h_trainc->directory = d_itemp1;
h_trainc->dims = d_itemp2;
h_trainc->entries = d_ftemp;
h_trainc->dcounter = d_itemp3;
//buffer for HTH
maxdlength[gpu_id] = SS_MAX(SS_MAX(h_traina->dlength, h_trainb->dlength),h_trainc->dlength);
maxnnz[gpu_id] = SS_MAX(SS_MAX(h_traina->nnz, h_trainb->nnz),h_trainc->nnz);
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d the cpu maxnnz is %ld\n", cpu_thread_id,maxnnz[gpu_id]);
#endif
HANDLE_ERROR(cudaMalloc((void**)&(d_hbuffer[gpu_id]), DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength[gpu_id] * sizeof(double)));
//HANDLE_ERROR(cudaMalloc((void**)&(d_hthbuffer[gpu_id]), DEFAULT_NFACTORS * maxnnz[gpu_id] * sizeof(double)));
//HANDLE_ERROR(cudaMalloc((void**)&d_invbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)));
//buffer for inversion
HANDLE_ERROR(cudaMalloc((void**)&(d_hbufptr[gpu_id]), maxdlength[gpu_id] * sizeof(double*)));
HANDLE_ERROR(cudaMalloc((void**)&(d_factptr[gpu_id]), maxdlength[gpu_id] * sizeof(double*)));
HANDLE_ERROR(cudaMalloc((void**)&(d_infoarray[gpu_id]), maxdlength[gpu_id] * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&(d_factora[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_a[gpu_id]), mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_a[gpu_id], mats[0]->values, mats[0]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[0]->values;
mats[0]->values = d_value_a[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_factora[gpu_id], mats[0], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[0]->values = d_ftemp;
}
#pragma omp barrier
HANDLE_ERROR(cudaMalloc((void**)&(d_factorb[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_b[gpu_id]), mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_b[gpu_id], mats[1]->values, mats[1]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[1]->values;
mats[1]->values = d_value_b[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_factorb[gpu_id], mats[1], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[1]->values = d_ftemp;
}
#pragma omp barrier
HANDLE_ERROR(cudaMalloc((void**)&(d_factorc[gpu_id]), sizeof(ordi_matrix)));
HANDLE_ERROR(cudaMalloc((void**)&(d_value_c[gpu_id]), mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(d_value_c[gpu_id], mats[2]->values, mats[2]->I * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
#pragma omp critical
{
d_ftemp = mats[2]->values;
mats[2]->values = d_value_c[gpu_id];
HANDLE_ERROR(cudaMemcpy(d_factorc[gpu_id], mats[2], sizeof(ordi_matrix), cudaMemcpyHostToDevice));
mats[2]->values = d_ftemp;
}
}
#ifdef CUDA_LOSS //to be done
sptensor_gpu_t * d_test, * d_validate;
#else
double loss = tc_loss_sq(traina, mats, algorithm_index);
double frobsq = tc_frob_sq(nmodes, regularization_index, mats);
#endif
tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, 0, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs);
//step into the kernel
idx_t mode_i, mode_n, m;
cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
for(idx_t e=1; e < DEFAULT_MAX_ITERATE+1; ++e) {
gettimeofday(&start,NULL);
//can set random variables
srand(time(0));
mode_i = rand()%3;
#ifdef ALSAS_DEBUG
mode_i = 0;
fprintf(stdout, "now the mode_i is %d\n", mode_i);
#endif
for(m=0; m < 3; m++) {
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
cudaSetDevice(cpu_thread_id % deviceCount); // "% num_gpus" allows more CPU threads than GPU devices
cusolverDnHandle_t handle;
if(!cpu_thread_id) handle = handle1;
else handle = handle0;
cissbasic_t * h_traina = h_cissta->cissunits[cpu_thread_id];
cissbasic_t * h_trainb = h_cisstb->cissunits[cpu_thread_id];
cissbasic_t * h_trainc = h_cisstc->cissunits[cpu_thread_id];
idx_t mymode_n = (mode_i + m)%3;
idx_t blocknum_u, blocknum_h, nnz, tilenum, blocknum_m;
HANDLE_ERROR(cudaMemset(d_hbuffer[cpu_thread_id], 0, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength[cpu_thread_id] * sizeof(double)));
//HANDLE_ERROR(cudaMemcpy(d_invbuffer, h_invbuffer, DEFAULT_NFACTORS * DEFAULT_NFACTORS * maxdlength * sizeof(double)),cudaMemcpyHostToDevice);
switch (mymode_n)
{
case 0:
{
nnz = h_traina->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d, nnz is %d, blocknum_m is %d, tilenum is %d\n", cpu_thread_id, nnz, blocknum_m, tilenum);
#endif
HANDLE_ERROR(cudaMemset(d_value_a[cpu_thread_id], 0, mats[0]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_traina->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_traina->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
p_mttkrp_gpu_as<<<blocknum_m,DEFAULT_BLOCKSIZE,0>>>(d_traina[cpu_thread_id], d_factora[cpu_thread_id], d_factorb[cpu_thread_id], d_factorc[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(cudaDeviceSynchronize());
#ifdef ALSAS_DEBUG
fprintf(stdout, "now in thread %d ends mttkrp\n", cpu_thread_id);
fprintf(stdout, "now in thread %d the blocknum for hth update is %ld and the dlength is %ld\n", cpu_thread_id, blocknum_h, h_traina->dlength);
#endif
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_traina[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength, regularization_index);
p_update_matrice<<<blocknum_u, DEFAULT_BLOCKSIZE, 0>>>(d_traina[cpu_thread_id], d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength, regularization_index);
HANDLE_ERROR(cudaDeviceSynchronize());
#ifdef ALS_DEBUG
p_cholecheck(d_value_a[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_traina->dlength);
#endif
HANDLE_SOLVERERR(cusolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_traina->dlength));
HANDLE_SOLVERERR(cusolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_traina->dlength));
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(cudaMemcpy(mats[0]->values + (h_cissta->d_ref[cpu_thread_id] -1) * DEFAULT_NFACTORS, d_value_a[cpu_thread_id] + (h_cissta->d_ref[cpu_thread_id] -1) * DEFAULT_NFACTORS, (h_cissta->d_ref[cpu_thread_id + 1] - h_cissta->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(d_value_a[cpu_thread_id] + (h_cissta->d_ref[(cpu_thread_id + 1)% deviceCount] - 1) * DEFAULT_NFACTORS, mats[0]->values + (h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount] -1 ) * DEFAULT_NFACTORS, (h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cissta->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaDeviceSynchronize());
break;
}
case 1:
{
nnz = h_trainb->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
HANDLE_ERROR(cudaMemset(d_value_b[cpu_thread_id], 0, mats[1]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_trainb->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_trainb->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
p_mttkrp_gpu_as<<<blocknum_m,DEFAULT_BLOCKSIZE,0>>>(d_trainb[cpu_thread_id], d_factorb[cpu_thread_id], d_factorc[cpu_thread_id], d_factora[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(cudaDeviceSynchronize());
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_trainb[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_b[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainb->dlength, regularization_index);
HANDLE_ERROR(cudaDeviceSynchronize());
p_update_matrice<<<blocknum_u, DEFAULT_BLOCKSIZE, 0>>>(d_trainb[cpu_thread_id], d_value_b[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainb->dlength, regularization_index);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_SOLVERERR(cusolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainb->dlength));
HANDLE_SOLVERERR(cusolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id],DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainb->dlength));
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(cudaMemcpy(mats[1]->values + (h_cisstb->d_ref[cpu_thread_id] - 1) * DEFAULT_NFACTORS, d_value_b[cpu_thread_id] + (h_cisstb->d_ref[cpu_thread_id] - 1)* DEFAULT_NFACTORS, (h_cisstb->d_ref[cpu_thread_id + 1] - h_cisstb->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(d_value_b[cpu_thread_id] + (h_cisstb->d_ref[(cpu_thread_id + 1)% deviceCount] - 1)* DEFAULT_NFACTORS, mats[1]->values + (h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount] - 1)* DEFAULT_NFACTORS, (h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cisstb->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaDeviceSynchronize());
break;
}
default:
{
nnz = h_trainc->nnz;
tilenum = nnz/DEFAULT_T_TILE_LENGTH + 1;
blocknum_m = tilenum/(((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
HANDLE_ERROR(cudaMemset(d_value_c[cpu_thread_id], 0, mats[2]->I * DEFAULT_NFACTORS * sizeof(double)));
blocknum_u = h_trainc->dlength / DEFAULT_BLOCKSIZE + 1;
blocknum_h = h_trainc->dlength / (((idx_t)DEFAULT_BLOCKSIZE)/((idx_t)ALS_WARPSIZE)) + 1;
p_mttkrp_gpu_as<<<blocknum_m,DEFAULT_BLOCKSIZE,0>>>(d_trainc[cpu_thread_id], d_factorc[cpu_thread_id], d_factora[cpu_thread_id], d_factorb[cpu_thread_id], d_hbuffer[cpu_thread_id], tilenum);
HANDLE_ERROR(cudaDeviceSynchronize());
//p_hth_update_as<<<blocknum_h,DEFAULT_BLOCKSIZE,0>>>(d_trainc[cpu_thread_id], d_hthbuffer[cpu_thread_id], d_value_c[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainc->dlength, regularization_index);
p_update_matrice<<<blocknum_u, DEFAULT_BLOCKSIZE, 0>>>(d_trainc[cpu_thread_id], d_value_c[cpu_thread_id], d_hbuffer[cpu_thread_id], d_hbufptr[cpu_thread_id], d_factptr[cpu_thread_id], h_trainc->dlength, regularization_index);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_SOLVERERR(cusolverDnDpotrfBatched(handle, uplo, DEFAULT_NFACTORS, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainc->dlength));
HANDLE_SOLVERERR(cusolverDnDpotrsBatched(handle, uplo, DEFAULT_NFACTORS, 1, d_hbufptr[cpu_thread_id], DEFAULT_NFACTORS, d_factptr[cpu_thread_id],DEFAULT_NFACTORS, d_infoarray[cpu_thread_id], (int)h_trainc->dlength));
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
#pragma omp barrier
//update the final results
HANDLE_ERROR(cudaMemcpy(mats[2]->values + (h_cisstc->d_ref[cpu_thread_id] -1 ) * DEFAULT_NFACTORS, d_value_c[cpu_thread_id] + (h_cisstc->d_ref[cpu_thread_id] - 1) * DEFAULT_NFACTORS, (h_cisstc->d_ref[cpu_thread_id + 1] - h_cisstc->d_ref[cpu_thread_id]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(d_value_c[cpu_thread_id] + (h_cisstc->d_ref[(cpu_thread_id + 1)% deviceCount] -1) * DEFAULT_NFACTORS, mats[2]->values + (h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount] -1)* DEFAULT_NFACTORS, (h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount + 1] - h_cisstc->d_ref[(cpu_thread_id + 1) % deviceCount]) * DEFAULT_NFACTORS * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaDeviceSynchronize());
break;
}
//p_update_als(train, mats, m, DEFAULT_NFACTORS, regularization_index);
}
}
}
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
#ifdef DEBUG
matrix_display(mats[0]);
matrix_display(mats[1]);
matrix_display(mats[2]);
#endif
gettimeofday(&end,NULL);
diff = 1000000*(end.tv_sec-start.tv_sec) + end.tv_usec - start.tv_usec;
printf("this time cost %ld\n",diff);
/* compute new obj value, print stats, and exit if converged */
loss = tc_loss_sq(traina, mats, algorithm_index);
frobsq = tc_frob_sq(nmodes, regularization_index, mats);
if(tc_converge(traina, validation, mats, best_mats, algorithm_index, loss, frobsq, e, nmodes, best_rmse, tolerance, nbadepochs, bestepochs, max_badepochs)) {
break;
}
} /* foreach iteration */
cudaSetDevice(0);
HANDLE_SOLVERERR(cusolverDnDestroy(handle0));
cudaSetDevice(1);
HANDLE_SOLVERERR(cusolverDnDestroy(handle1));
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
cudaSetDevice(cpu_thread_id % deviceCount);
//end the cusolver
//HANDLE_SOLVERERR(cusolverDnDestroy(handle));
//free the cudabuffer
cudaFree(d_counter_a[cpu_thread_id]);
cudaFree(d_directory_a[cpu_thread_id]);
cudaFree(d_dims_a[cpu_thread_id]);
cudaFree(d_entries_a[cpu_thread_id]);
cudaFree(d_counter_b[cpu_thread_id]);
cudaFree(d_directory_b[cpu_thread_id]);
cudaFree(d_dims_b[cpu_thread_id]);
cudaFree(d_entries_b[cpu_thread_id]);
cudaFree(d_counter_c[cpu_thread_id]);
cudaFree(d_directory_c[cpu_thread_id]);
cudaFree(d_dims_c[cpu_thread_id]);
cudaFree(d_entries_c[cpu_thread_id]);
cudaFree(d_hbuffer[cpu_thread_id]);
cudaFree(d_hbufptr[cpu_thread_id]);
//cudaFree(d_hthbuffer[cpu_thread_id]);
cudaFree(d_factptr[cpu_thread_id]);
cudaFree(d_infoarray[cpu_thread_id]);
cudaFree(d_value_a[cpu_thread_id]);
cudaFree(d_value_b[cpu_thread_id]);
cudaFree(d_value_c[cpu_thread_id]);
cudaFree(d_traina[cpu_thread_id]);
cudaFree(d_trainb[cpu_thread_id]);
cudaFree(d_trainc[cpu_thread_id]);
cudaFree(d_factora[cpu_thread_id]);
cudaFree(d_factorb[cpu_thread_id]);
cudaFree(d_factorc[cpu_thread_id]);
//cudaFree(d_hthbuffer[cpu_thread_id]);
cudaDeviceReset();
}
ciss_free(h_cissta, deviceCount);
ciss_free(h_cisstb, deviceCount);
ciss_free(h_cisstc, deviceCount);
free(d_traina);
free(d_trainb);
free(d_trainc);
free(d_directory_a);
free(d_directory_b);
free(d_directory_c);
free(d_counter_a);
free(d_counter_b);
free(d_counter_c);
free(d_dims_a);
free(d_dims_b);
free(d_dims_c);
free(d_entries_a);
free(d_entries_b);
free(d_entries_c);
free(d_hbuffer);
//free(d_hthbuffer);
free(d_hbufptr);
free(d_infoarray);
free(d_factptr);
//free(handle);
free(d_factora);
free(d_factorb);
free(d_factorc);
free(d_value_a);
free(d_value_b);
free(d_value_c);
free(maxdlength);
free(maxnnz);
}
}
|
4a00431b07d36df2bb86f22ecccaea715cc932e4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "hello_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
hello_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
hello_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
hello_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4a00431b07d36df2bb86f22ecccaea715cc932e4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "hello_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
hello_kernel<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
hello_kernel<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
hello_kernel<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3209e1fc49c07c456605edba47f4891429c7f396.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zpotf2.cu normal z -> d, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#define PRECISION_d
//#if (GPUSHMEM < 200)
#define ddot_max_bs 512 // 512 is max threads for 1.x cards
//#else
//#define ddot_max_bs 1024
//#endif
void dpotf2_dscal(magma_int_t n, double *x, magma_int_t incx);
void dpotf2_ddot(magma_int_t n, double *x, magma_int_t incx);
#if defined(PRECISION_z) || defined(PRECISION_c)
void dlacgv(magma_int_t n, double *x, magma_int_t incx);
#endif
/**
Purpose
-------
dpotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA DOUBLE_PRECISION array, dimension (LDDA,N)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@ingroup magma_dposv_aux
********************************************************************/
extern "C" magma_int_t
magma_dpotf2_gpu(
magma_uplo_t uplo, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *info )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t j;
*info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
*info = -1;
} else if (n < 0 || n > ddot_max_bs) {
*info = -2;
} else if (ldda < max(1,n)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (n == 0) {
return *info;
}
double alpha = MAGMA_D_NEG_ONE;
double beta = MAGMA_D_ONE;
if (uplo == MagmaUpper) {
for(j = 0; j < n; j++) {
dpotf2_ddot(j, dA(0,j), 1); // including ddot product and update a(j,j)
if (j < n) {
#if defined(PRECISION_z) || defined(PRECISION_c)
dlacgv(j, dA(0, j), 1);
#endif
magma_dgemv( MagmaTrans, j, n-j-1,
alpha, dA(0, j+1), ldda,
dA(0, j), 1,
beta, dA(j, j+1), ldda);
#if defined(PRECISION_z) || defined(PRECISION_c)
dlacgv(j, dA(0, j), 1);
#endif
dpotf2_dscal(n-j, dA(j,j), ldda);
}
}
}
else {
for(j = 0; j < n; j++) {
dpotf2_ddot(j, dA(j,0), ldda); // including ddot product and update a(j,j)
if (j < n) {
#if defined(PRECISION_z) || defined(PRECISION_c)
dlacgv(j, dA(j, 0), ldda);
#endif
magma_dgemv( MagmaNoTrans, n-j-1, j,
alpha, dA(j+1, 0), ldda,
dA(j,0), ldda,
beta, dA(j+1, j), 1 );
#if defined(PRECISION_z) || defined(PRECISION_c)
dlacgv(j, dA(j, 0), ldda);
#endif
dpotf2_dscal(n-j, dA(j,j), 1);
}
}
}
return *info;
}
#define dscal_bs 32
#define ddot_bs 512
#define dlacgv_bs 512
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ double shared_data[];
__global__ void kernel_ddot(int n, double *x, int incx, int threadSize)
{
int tx = threadIdx.x;
double *sdata = shared_data;
double res = MAGMA_D_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_D_REAL(res * MAGMA_D_CNJG(res));
__syncthreads();
for(int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile double* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
double xreal = MAGMA_D_REAL(x[n*incx]);
x[n*incx] = MAGMA_D_MAKE( sqrt(xreal - sdata[0]), 0 );
}
}
void dpotf2_ddot(magma_int_t n, double *x, magma_int_t incx)
{
/*
Specialized Ddot
1) performs ddot sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > ddot_max_bs) {
fprintf( stderr, "n = %d > %d is not supported in dpotf2_ddot\n", (int) n, (int) ddot_max_bs);
return;
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
hipLaunchKernelGGL(( kernel_ddot), dim3(1), dim3(threadSize), threadSize * sizeof(double), magma_stream, n, x, incx, threadSize);
}
__global__ void kernel_dscal(int n, double *x, int incx)
{
int id = blockIdx.x * dscal_bs + threadIdx.x;
__shared__ double factor;
if (threadIdx.x == 0) {
factor = MAGMA_D_MAKE(1.0/MAGMA_D_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id >0) {
x[id*incx] = x[id*incx] * factor;
}
}
void dpotf2_dscal(magma_int_t n, double *x, magma_int_t incx)
{
/*
Specialized Dscal perform x[1:n-1]/x[0]
*/
dim3 threads(dscal_bs, 1, 1);
int num_blocks = (n - 1)/dscal_bs + 1;
dim3 grid(num_blocks,1);
hipLaunchKernelGGL(( kernel_dscal), dim3(grid), dim3(threads), 0, magma_stream , n, x, incx);
}
#if defined(PRECISION_z) || defined(PRECISION_c)
__global__ void kernel_dlacgv(int n, double *x, int incx)
{
int id = blockIdx.x * dlacgv_bs + threadIdx.x;
if ( id < n ) {
x[id*incx] = MAGMA_D_CNJG(x[id*incx]);
}
}
/**
Purpose
-------
DLACGV conjugates a real vector of length N.
Arguments
---------
@param[in]
n INTEGER
The length of the vector X. N >= 0.
@param[in,out]
x DOUBLE PRECISION array, dimension
(1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
@param[in]
incx INTEGER
The spacing between successive elements of X.
@ingroup magma_dposv_aux
********************************************************************/
void dlacgv(magma_int_t n, double *x, magma_int_t incx)
{
dim3 threads(dlacgv_bs, 1, 1);
int num_blocks = (n - 1)/dlacgv_bs + 1;
dim3 grid(num_blocks,1);
hipLaunchKernelGGL(( kernel_dlacgv), dim3(grid), dim3(threads), 0, magma_stream , n, x, incx);
}
#endif // defined(PRECISION_z) || defined(PRECISION_c)
| 3209e1fc49c07c456605edba47f4891429c7f396.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zpotf2.cu normal z -> d, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#define PRECISION_d
//#if (GPUSHMEM < 200)
#define ddot_max_bs 512 // 512 is max threads for 1.x cards
//#else
//#define ddot_max_bs 1024
//#endif
void dpotf2_dscal(magma_int_t n, double *x, magma_int_t incx);
void dpotf2_ddot(magma_int_t n, double *x, magma_int_t incx);
#if defined(PRECISION_z) || defined(PRECISION_c)
void dlacgv(magma_int_t n, double *x, magma_int_t incx);
#endif
/**
Purpose
-------
dpotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA DOUBLE_PRECISION array, dimension (LDDA,N)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@ingroup magma_dposv_aux
********************************************************************/
extern "C" magma_int_t
magma_dpotf2_gpu(
magma_uplo_t uplo, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *info )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t j;
*info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
*info = -1;
} else if (n < 0 || n > ddot_max_bs) {
*info = -2;
} else if (ldda < max(1,n)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (n == 0) {
return *info;
}
double alpha = MAGMA_D_NEG_ONE;
double beta = MAGMA_D_ONE;
if (uplo == MagmaUpper) {
for(j = 0; j < n; j++) {
dpotf2_ddot(j, dA(0,j), 1); // including ddot product and update a(j,j)
if (j < n) {
#if defined(PRECISION_z) || defined(PRECISION_c)
dlacgv(j, dA(0, j), 1);
#endif
magma_dgemv( MagmaTrans, j, n-j-1,
alpha, dA(0, j+1), ldda,
dA(0, j), 1,
beta, dA(j, j+1), ldda);
#if defined(PRECISION_z) || defined(PRECISION_c)
dlacgv(j, dA(0, j), 1);
#endif
dpotf2_dscal(n-j, dA(j,j), ldda);
}
}
}
else {
for(j = 0; j < n; j++) {
dpotf2_ddot(j, dA(j,0), ldda); // including ddot product and update a(j,j)
if (j < n) {
#if defined(PRECISION_z) || defined(PRECISION_c)
dlacgv(j, dA(j, 0), ldda);
#endif
magma_dgemv( MagmaNoTrans, n-j-1, j,
alpha, dA(j+1, 0), ldda,
dA(j,0), ldda,
beta, dA(j+1, j), 1 );
#if defined(PRECISION_z) || defined(PRECISION_c)
dlacgv(j, dA(j, 0), ldda);
#endif
dpotf2_dscal(n-j, dA(j,j), 1);
}
}
}
return *info;
}
#define dscal_bs 32
#define ddot_bs 512
#define dlacgv_bs 512
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ double shared_data[];
__global__ void kernel_ddot(int n, double *x, int incx, int threadSize)
{
int tx = threadIdx.x;
double *sdata = shared_data;
double res = MAGMA_D_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_D_REAL(res * MAGMA_D_CNJG(res));
__syncthreads();
for(int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile double* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
double xreal = MAGMA_D_REAL(x[n*incx]);
x[n*incx] = MAGMA_D_MAKE( sqrt(xreal - sdata[0]), 0 );
}
}
void dpotf2_ddot(magma_int_t n, double *x, magma_int_t incx)
{
/*
Specialized Ddot
1) performs ddot sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > ddot_max_bs) {
fprintf( stderr, "n = %d > %d is not supported in dpotf2_ddot\n", (int) n, (int) ddot_max_bs);
return;
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
kernel_ddot<<< 1, threadSize, threadSize * sizeof(double), magma_stream>>> (n, x, incx, threadSize);
}
__global__ void kernel_dscal(int n, double *x, int incx)
{
int id = blockIdx.x * dscal_bs + threadIdx.x;
__shared__ double factor;
if (threadIdx.x == 0) {
factor = MAGMA_D_MAKE(1.0/MAGMA_D_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id >0) {
x[id*incx] = x[id*incx] * factor;
}
}
void dpotf2_dscal(magma_int_t n, double *x, magma_int_t incx)
{
/*
Specialized Dscal perform x[1:n-1]/x[0]
*/
dim3 threads(dscal_bs, 1, 1);
int num_blocks = (n - 1)/dscal_bs + 1;
dim3 grid(num_blocks,1);
kernel_dscal<<< grid, threads, 0, magma_stream >>> (n, x, incx);
}
#if defined(PRECISION_z) || defined(PRECISION_c)
__global__ void kernel_dlacgv(int n, double *x, int incx)
{
int id = blockIdx.x * dlacgv_bs + threadIdx.x;
if ( id < n ) {
x[id*incx] = MAGMA_D_CNJG(x[id*incx]);
}
}
/**
Purpose
-------
DLACGV conjugates a real vector of length N.
Arguments
---------
@param[in]
n INTEGER
The length of the vector X. N >= 0.
@param[in,out]
x DOUBLE PRECISION array, dimension
(1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
@param[in]
incx INTEGER
The spacing between successive elements of X.
@ingroup magma_dposv_aux
********************************************************************/
void dlacgv(magma_int_t n, double *x, magma_int_t incx)
{
dim3 threads(dlacgv_bs, 1, 1);
int num_blocks = (n - 1)/dlacgv_bs + 1;
dim3 grid(num_blocks,1);
kernel_dlacgv<<< grid, threads, 0, magma_stream >>> (n, x, incx);
}
#endif // defined(PRECISION_z) || defined(PRECISION_c)
|
4e2e441d9dba4009789921ee4749ed32acf5d709.hip | // !!! This is a file automatically generated by hipify!!!
// Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_detection_output_layer_acc.h"
#include "tnn/device/cuda/acc/cuda_detection_output_layer_acc_kernel.cuh"
#include "tnn/utils/dims_vector_utils.h"
#include "tnn/utils/bbox_util.h"
namespace TNN_NS {
inline CodeType GetCodeType(const int number) {
ASSERT(number > 0 && number < 4);
switch (number) {
case 1: {
return PriorBoxParameter_CodeType_CORNER;
}
case 2: {
return PriorBoxParameter_CodeType_CENTER_SIZE;
}
default: {
return PriorBoxParameter_CodeType_CORNER_SIZE;
}
}
}
Status CudaDetectionOutputLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
CudaLayerAcc::Init(context, param, resource, inputs, outputs);
DetectionOutputLayerParam *params = dynamic_cast<DetectionOutputLayerParam *>(param_);
if (!params) {
LOGE("Error: DetectionOutputLayerParam is nil\n");
return Status(TNNERR_MODEL_ERR, "Error: DetectionOutputLayerParam is nil");
}
int num = inputs[0]->GetBlobDesc().dims[0];
num_priors = inputs[2]->GetBlobDesc().dims[2] / 4;
num_loc_classes = params->share_location ? 1 : params->num_classes;
top_k = ::min(params->nms_param.top_k, num_priors);
int num_overlaps = top_k * (top_k - 1) / 2;
if (params->keep_top_k > 0) {
max_top_k = num * params->keep_top_k;
} else {
max_top_k = num * 256;
}
CreateTempBuf(num * num_loc_classes * num_priors * 4 * sizeof(float));
CreateTempBuf(num * params->keep_top_k * 7 * sizeof(float));
CreateTempBuf(num * params->num_classes * num_priors * sizeof(float));
CreateTempBuf(num * params->num_classes * num_priors * sizeof(float));
CreateTempBuf(num * params->num_classes * num_priors * sizeof(int));
CreateTempBuf(num * params->num_classes * num_priors * sizeof(int));
CreateTempBuf(num * params->num_classes * num_overlaps * sizeof(float));
CreateTempBuf(num * params->num_classes * num_priors * sizeof(bool));
CreateTempBuf(num * params->num_classes * sizeof(int));
CreateTempBuf(num * params->num_classes * top_k * sizeof(float));
CreateTempBuf(num * params->num_classes * top_k * sizeof(int));
CreateTempBuf(num * params->keep_top_k * sizeof(float));
CreateTempBuf(num * params->keep_top_k * sizeof(float));
CreateTempBuf(num * sizeof(int));
temp_storage_bytes = 32 * 1024 * 1024 + 256;
CreateTempBuf(temp_storage_bytes);
return TNN_OK;
}
Status CudaDetectionOutputLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return TNN_OK;
}
Status CudaDetectionOutputLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
Blob *input_blob1 = inputs[0];
Blob *input_blob2 = inputs[1];
Blob *input_blob3 = inputs[2];
Blob *output_blob = outputs[0];
float* loc_data_d = static_cast<float*>(input_blob1->GetHandle().base);
float* conf_data_d = static_cast<float*>(input_blob2->GetHandle().base);
float* prior_data_d = static_cast<float*>(input_blob3->GetHandle().base);
int num = input_blob1->GetBlobDesc().dims[0];
DetectionOutputLayerParam *params = dynamic_cast<DetectionOutputLayerParam *>(param_);
CodeType code_type = GetCodeType(params->code_type);
decode_bboxes_all_launcher(loc_data_d, prior_data_d, num, num_priors, num_loc_classes,
params->background_label_id, code_type, params->share_location, params->variance_encoded_in_target,
false, false, nullptr, (float*)tempbufs_[0].ptr, context_->GetStream());
int *all_out_size = new int[num];
int num_kept = 0;
NMSFast((float*)tempbufs_[0].ptr, conf_data_d, num, params->num_classes, num_loc_classes, num_priors,
params->background_label_id, params->share_location, params->keep_top_k, top_k, params->confidence_threshold,
params->nms_param.nms_threshold, 1.001f, params->eta, false, nullptr, 0, (float*)tempbufs_[14].ptr,
temp_storage_bytes, tempbufs_, (float*)tempbufs_[1].ptr, all_out_size, &num_kept, context_->GetStream());
std::vector<int> top_shape(2, 1);
top_shape.push_back(num_kept);
top_shape.push_back(7);
if (num_kept == 0) {
top_shape[2] = num;
}
output_blob->GetBlobDesc().dims[2] = top_shape[2];
float* top_data_d = static_cast<float*>(output_blob->GetHandle().base);
if (num_kept == 0) {
int out_size = DimsVectorUtils::Count(output_blob->GetBlobDesc().dims);
float *top_data = new float[out_size];
for (int vi = 0; vi < out_size; vi++) {
top_data[vi] = -1;
}
for (int i = 0; i < num; ++i) {
top_data[i * 7 + 0] = i;
}
CUDA_CHECK(hipMemcpyAsync(top_data_d, top_data, out_size * sizeof(float), hipMemcpyHostToDevice, context_->GetStream()));
//TODO (johnzlli) need refactor
CUDA_CHECK(hipStreamSynchronize(context_->GetStream()));
delete [] top_data;
} else {
CUDA_CHECK(hipMemcpyAsync(top_data_d, tempbufs_[1].ptr, num_kept * 7 * sizeof(float), hipMemcpyDeviceToDevice,
context_->GetStream()));
}
delete [] all_out_size;
return TNN_OK;
}
REGISTER_CUDA_ACC(DetectionOutput, LAYER_DETECTION_OUTPUT);
} // namespace TNN_NS
| 4e2e441d9dba4009789921ee4749ed32acf5d709.cu | // Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_detection_output_layer_acc.h"
#include "tnn/device/cuda/acc/cuda_detection_output_layer_acc_kernel.cuh"
#include "tnn/utils/dims_vector_utils.h"
#include "tnn/utils/bbox_util.h"
namespace TNN_NS {
inline CodeType GetCodeType(const int number) {
ASSERT(number > 0 && number < 4);
switch (number) {
case 1: {
return PriorBoxParameter_CodeType_CORNER;
}
case 2: {
return PriorBoxParameter_CodeType_CENTER_SIZE;
}
default: {
return PriorBoxParameter_CodeType_CORNER_SIZE;
}
}
}
Status CudaDetectionOutputLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
CudaLayerAcc::Init(context, param, resource, inputs, outputs);
DetectionOutputLayerParam *params = dynamic_cast<DetectionOutputLayerParam *>(param_);
if (!params) {
LOGE("Error: DetectionOutputLayerParam is nil\n");
return Status(TNNERR_MODEL_ERR, "Error: DetectionOutputLayerParam is nil");
}
int num = inputs[0]->GetBlobDesc().dims[0];
num_priors = inputs[2]->GetBlobDesc().dims[2] / 4;
num_loc_classes = params->share_location ? 1 : params->num_classes;
top_k = std::min(params->nms_param.top_k, num_priors);
int num_overlaps = top_k * (top_k - 1) / 2;
if (params->keep_top_k > 0) {
max_top_k = num * params->keep_top_k;
} else {
max_top_k = num * 256;
}
CreateTempBuf(num * num_loc_classes * num_priors * 4 * sizeof(float));
CreateTempBuf(num * params->keep_top_k * 7 * sizeof(float));
CreateTempBuf(num * params->num_classes * num_priors * sizeof(float));
CreateTempBuf(num * params->num_classes * num_priors * sizeof(float));
CreateTempBuf(num * params->num_classes * num_priors * sizeof(int));
CreateTempBuf(num * params->num_classes * num_priors * sizeof(int));
CreateTempBuf(num * params->num_classes * num_overlaps * sizeof(float));
CreateTempBuf(num * params->num_classes * num_priors * sizeof(bool));
CreateTempBuf(num * params->num_classes * sizeof(int));
CreateTempBuf(num * params->num_classes * top_k * sizeof(float));
CreateTempBuf(num * params->num_classes * top_k * sizeof(int));
CreateTempBuf(num * params->keep_top_k * sizeof(float));
CreateTempBuf(num * params->keep_top_k * sizeof(float));
CreateTempBuf(num * sizeof(int));
temp_storage_bytes = 32 * 1024 * 1024 + 256;
CreateTempBuf(temp_storage_bytes);
return TNN_OK;
}
Status CudaDetectionOutputLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return TNN_OK;
}
Status CudaDetectionOutputLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
Blob *input_blob1 = inputs[0];
Blob *input_blob2 = inputs[1];
Blob *input_blob3 = inputs[2];
Blob *output_blob = outputs[0];
float* loc_data_d = static_cast<float*>(input_blob1->GetHandle().base);
float* conf_data_d = static_cast<float*>(input_blob2->GetHandle().base);
float* prior_data_d = static_cast<float*>(input_blob3->GetHandle().base);
int num = input_blob1->GetBlobDesc().dims[0];
DetectionOutputLayerParam *params = dynamic_cast<DetectionOutputLayerParam *>(param_);
CodeType code_type = GetCodeType(params->code_type);
decode_bboxes_all_launcher(loc_data_d, prior_data_d, num, num_priors, num_loc_classes,
params->background_label_id, code_type, params->share_location, params->variance_encoded_in_target,
false, false, nullptr, (float*)tempbufs_[0].ptr, context_->GetStream());
int *all_out_size = new int[num];
int num_kept = 0;
NMSFast((float*)tempbufs_[0].ptr, conf_data_d, num, params->num_classes, num_loc_classes, num_priors,
params->background_label_id, params->share_location, params->keep_top_k, top_k, params->confidence_threshold,
params->nms_param.nms_threshold, 1.001f, params->eta, false, nullptr, 0, (float*)tempbufs_[14].ptr,
temp_storage_bytes, tempbufs_, (float*)tempbufs_[1].ptr, all_out_size, &num_kept, context_->GetStream());
std::vector<int> top_shape(2, 1);
top_shape.push_back(num_kept);
top_shape.push_back(7);
if (num_kept == 0) {
top_shape[2] = num;
}
output_blob->GetBlobDesc().dims[2] = top_shape[2];
float* top_data_d = static_cast<float*>(output_blob->GetHandle().base);
if (num_kept == 0) {
int out_size = DimsVectorUtils::Count(output_blob->GetBlobDesc().dims);
float *top_data = new float[out_size];
for (int vi = 0; vi < out_size; vi++) {
top_data[vi] = -1;
}
for (int i = 0; i < num; ++i) {
top_data[i * 7 + 0] = i;
}
CUDA_CHECK(cudaMemcpyAsync(top_data_d, top_data, out_size * sizeof(float), cudaMemcpyHostToDevice, context_->GetStream()));
//TODO (johnzlli) need refactor
CUDA_CHECK(cudaStreamSynchronize(context_->GetStream()));
delete [] top_data;
} else {
CUDA_CHECK(cudaMemcpyAsync(top_data_d, tempbufs_[1].ptr, num_kept * 7 * sizeof(float), cudaMemcpyDeviceToDevice,
context_->GetStream()));
}
delete [] all_out_size;
return TNN_OK;
}
REGISTER_CUDA_ACC(DetectionOutput, LAYER_DETECTION_OUTPUT);
} // namespace TNN_NS
|
a985cc0782c6328b8c9a67565ce28c9341ba353b.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include "macros.h"
#include "cuda_utils.h"
#include "pscan.h"
#include "helper_math.h"
static int edgeTable[256] = {
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
};
static int triTable[256][16] = {
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}
};
__device__ float3 vertexInterp(float isolevel, float3 p1, float3 p2, float valp1, float valp2) {
float mu;
float3 p;
if (abs(isolevel - valp1) < 0.0001f) {
return p1;
}
if (abs(isolevel - valp2) < 0.0001f) {
return p2;
}
if (abs(valp1 - valp2) < 0.0001f) {
return p1;
}
mu = (isolevel - valp1) / (valp2 - valp1);
p.x = p1.x + mu * (p2.x - p1.x);
p.y = p1.y + mu * (p2.y - p1.y);
p.z = p1.z + mu * (p2.z - p1.z);
return p;
}
__global__ void mcubes_cuda_kernel(
const torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> vol,
float3 *vertices,
int *ntris_in_cells,
int3 nGrids,
float threshold,
const torch::PackedTensorAccessor32<int, 1, torch::RestrictPtrTraits> edgeTable,
const torch::PackedTensorAccessor32<int, 2, torch::RestrictPtrTraits> triTable) {
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
const int iz = blockIdx.z * blockDim.z + threadIdx.z;
if (ix >= nGrids.x - 1 || iy >= nGrids.y - 1 || iz >= nGrids.z - 1) {
return;
}
int cubeindex = 0;
int bits = 0;
float val[8];
float3 p[8];
int indexTable[8] {
0, 1, 4, 5, 3, 2, 7, 6
};
for (int k = 0; k <= 1; k++) {
for (int j = 0; j <= 1; j++) {
for (int i = 0; i <= 1; i++) {
const int jx = ix + i;
const int jy = iy + j;
const int jz = iz + k;
if (vol[jz][jy][jx] < threshold) {
cubeindex |= 0x01 << indexTable[bits];
}
val[indexTable[bits]] = vol[jz][jy][jx];
p[indexTable[bits]] = make_float3(jx, jy, jz);
bits += 1;
}
}
}
/* Find the vertices where the surface intersects the cube */
float3 vertlist[12];
if ((edgeTable[cubeindex] & 1) != 0) {
vertlist[0] = vertexInterp(threshold, p[0], p[1], val[0], val[1]);
}
if ((edgeTable[cubeindex] & 2) != 0) {
vertlist[1] = vertexInterp(threshold, p[1], p[2], val[1], val[2]);
}
if ((edgeTable[cubeindex] & 4) != 0) {
vertlist[2] = vertexInterp(threshold, p[2], p[3], val[2], val[3]);
}
if ((edgeTable[cubeindex] & 8) != 0) {
vertlist[3] = vertexInterp(threshold, p[3], p[0], val[3], val[0]);
}
if ((edgeTable[cubeindex] & 16) != 0) {
vertlist[4] = vertexInterp(threshold, p[4], p[5], val[4], val[5]);
}
if ((edgeTable[cubeindex] & 32) != 0) {
vertlist[5] = vertexInterp(threshold, p[5], p[6], val[5], val[6]);
}
if ((edgeTable[cubeindex] & 64) != 0) {
vertlist[6] = vertexInterp(threshold, p[6], p[7], val[6], val[7]);
}
if ((edgeTable[cubeindex] & 128) != 0) {
vertlist[7] = vertexInterp(threshold, p[7], p[4], val[7], val[4]);
}
if ((edgeTable[cubeindex] & 256) != 0) {
vertlist[8] = vertexInterp(threshold, p[0], p[4], val[0], val[4]);
}
if ((edgeTable[cubeindex] & 512) != 0) {
vertlist[9] = vertexInterp(threshold, p[1], p[5], val[1], val[5]);
}
if ((edgeTable[cubeindex] & 1024) != 0) {
vertlist[10] = vertexInterp(threshold, p[2], p[6], val[2], val[6]);
}
if ((edgeTable[cubeindex] & 2048) != 0) {
vertlist[11] = vertexInterp(threshold, p[3], p[7], val[3], val[7]);
}
const int id = (iz * nGrids.y + iy) * nGrids.x + ix;
for (int i = 0; i < 4; i++) {
if (triTable[cubeindex][i * 3 + 0] >= 0) {
float3 tri[3];
for (int k = 0; k < 3; k++) {
tri[k] = vertlist[triTable[cubeindex][i * 3 + k]];
}
for (int k = 0; k < 3; k++) {
vertices[id * 12 + i * 3 + k] = tri[k];
}
ntris_in_cells[id] += 1;
} else {
for (int k = 0; k < 3; k++) {
vertices[id * 12 + i * 3 + k] = make_float3(0.0, 0.0, 0.0);
}
}
}
}
__global__ void compaction(
float3 *vertices,
int *ntris,
int *offsets,
int3 nGrids,
torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> verts,
torch::PackedTensorAccessor32<int, 2, torch::RestrictPtrTraits> faces) {
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
const int iz = blockIdx.z * blockDim.z + threadIdx.z;
const int index = (iz * nGrids.y + iy) * nGrids.x + ix;
const int size = nGrids.x * nGrids.y * nGrids.z;
if (index < size) {
const int start = offsets[index];
const int n = ntris[index];
for (int i = 0; i < n; i++) {
for (int k = 0; k < 3; k++) {
verts[(start + i) * 3 + k][0] = vertices[index * 12 + i * 3 + k].x;
verts[(start + i) * 3 + k][1] = vertices[index * 12 + i * 3 + k].y;
verts[(start + i) * 3 + k][2] = vertices[index * 12 + i * 3 + k].z;
faces[start + i][k] = (start + i) * 3 + k;
}
}
}
}
std::vector<torch::Tensor> mcubes_cuda(torch::Tensor vol, float threshold) {
// Check input tensor
CHECK_CUDA(vol);
CHECK_CONTIGUOUS(vol);
CHECK_IS_FLOAT(vol);
CHECK_N_DIM(vol, 3);
// Transfer table data to device
torch::Tensor edgeTableTensor = torch::zeros({256},
torch::TensorOptions().dtype(at::kInt).device(at::kCPU));
{
auto acsr = edgeTableTensor.accessor<int, 1>();
for (int i = 0; i < 256; i++) {
acsr[i] = edgeTable[i];
}
}
torch::Tensor edgeTableTensorCuda = edgeTableTensor.to(vol.device());
torch::Tensor triTableTensor = torch::zeros({256, 16},
torch::TensorOptions().dtype(at::kInt).device(at::kCPU));
{
auto acsr = triTableTensor.accessor<int, 2>();
for (int i = 0; i < 256; i++) {
for (int j = 0; j < 16; j++) {
acsr[i][j] = triTable[i][j];
}
}
}
torch::Tensor triTableTensorCuda = triTableTensor.to(vol.device());
// Size parameters
const int64_t Nx = vol.size(2);
const int64_t Ny = vol.size(1);
const int64_t Nz = vol.size(0);
const uint32_t BLOCK_SIZE = 8;
const uint32_t gridx = (Nx + BLOCK_SIZE - 1) / BLOCK_SIZE;
const uint32_t gridy = (Ny + BLOCK_SIZE - 1) / BLOCK_SIZE;
const uint32_t gridz = (Nz + BLOCK_SIZE - 1) / BLOCK_SIZE;
const dim3 blocks = { gridx, gridy, gridz };
const dim3 threads = { BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE };
const int3 nGrids = make_int3(Nx, Ny, Nz);
const int dev_id = vol.device().index();
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Allocate vertex buffer
torch::Tensor vert_buffer = torch::zeros({12 * Nx * Ny * Nz * 3},
torch::TensorOptions().dtype(torch::kFloat32).device(vol.device()));
torch::Tensor ntris_in_cells = torch::zeros({12 * Nx * Ny * Nz},
torch::TensorOptions().dtype(torch::kInt32).device(vol.device()));
torch::Tensor offsets = torch::zeros({Nx * Ny * Nz},
torch::TensorOptions().dtype(torch::kInt32).device(vol.device()));
// Kernel call
hipLaunchKernelGGL(( mcubes_cuda_kernel), dim3(blocks), dim3(threads), 0, stream,
vol.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
(float3*)vert_buffer.data_ptr(),
(int*)ntris_in_cells.data_ptr(),
nGrids,
threshold,
edgeTableTensorCuda.packed_accessor32<int, 1, torch::RestrictPtrTraits>(),
triTableTensorCuda.packed_accessor32<int, 2, torch::RestrictPtrTraits>()
);
hipDeviceSynchronize();
// Compute number of triangles
prescan((int*)ntris_in_cells.data_ptr(),
(int*)offsets.data_ptr(),
Nx * Ny * Nz, dev_id, stream);
hipSetDevice(dev_id);
hipDeviceSynchronize();
hipSetDevice(dev_id);
const int ntri_last = ntris_in_cells[Nx * Ny * Nz - 1].cpu().item<int>();
const int offset_last = offsets[Nx * Ny * Nz - 1].cpu().item<int>();
// hipMemcpy(&ntri_last, ntris_in_cells + (Nx * Ny * Nz - 1), sizeof(int), hipMemcpyDeviceToHost);
// hipMemcpy(&offset_last, offsets + (Nx * Ny * Nz - 1), sizeof(int), hipMemcpyDeviceToHost);
const int ntris = max(1, ntri_last + offset_last);
// Triangle list compaction
torch::Tensor verts = torch::zeros({ntris * 3, 3},
torch::TensorOptions().dtype(torch::kFloat32).device(vol.device()));
torch::Tensor faces = torch::zeros({ntris, 3},
torch::TensorOptions().dtype(torch::kInt32).device(vol.device()));
hipLaunchKernelGGL(( compaction), dim3(blocks), dim3(threads), 0, stream,
(float3*)vert_buffer.data_ptr(),
(int*)ntris_in_cells.data_ptr(),
(int*)offsets.data_ptr(),
nGrids,
verts.packed_accessor32<float, 2, torch::RestrictPtrTraits>(),
faces.packed_accessor32<int, 2, torch::RestrictPtrTraits>()
);
hipDeviceSynchronize();
CUDA_CHECK_ERRORS();
return { verts, faces };
}
| a985cc0782c6328b8c9a67565ce28c9341ba353b.cu | #include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include "macros.h"
#include "cuda_utils.h"
#include "pscan.h"
#include "helper_math.h"
static int edgeTable[256] = {
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
};
static int triTable[256][16] = {
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}
};
__device__ float3 vertexInterp(float isolevel, float3 p1, float3 p2, float valp1, float valp2) {
float mu;
float3 p;
if (abs(isolevel - valp1) < 0.0001f) {
return p1;
}
if (abs(isolevel - valp2) < 0.0001f) {
return p2;
}
if (abs(valp1 - valp2) < 0.0001f) {
return p1;
}
mu = (isolevel - valp1) / (valp2 - valp1);
p.x = p1.x + mu * (p2.x - p1.x);
p.y = p1.y + mu * (p2.y - p1.y);
p.z = p1.z + mu * (p2.z - p1.z);
return p;
}
__global__ void mcubes_cuda_kernel(
const torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> vol,
float3 *vertices,
int *ntris_in_cells,
int3 nGrids,
float threshold,
const torch::PackedTensorAccessor32<int, 1, torch::RestrictPtrTraits> edgeTable,
const torch::PackedTensorAccessor32<int, 2, torch::RestrictPtrTraits> triTable) {
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
const int iz = blockIdx.z * blockDim.z + threadIdx.z;
if (ix >= nGrids.x - 1 || iy >= nGrids.y - 1 || iz >= nGrids.z - 1) {
return;
}
int cubeindex = 0;
int bits = 0;
float val[8];
float3 p[8];
int indexTable[8] {
0, 1, 4, 5, 3, 2, 7, 6
};
for (int k = 0; k <= 1; k++) {
for (int j = 0; j <= 1; j++) {
for (int i = 0; i <= 1; i++) {
const int jx = ix + i;
const int jy = iy + j;
const int jz = iz + k;
if (vol[jz][jy][jx] < threshold) {
cubeindex |= 0x01 << indexTable[bits];
}
val[indexTable[bits]] = vol[jz][jy][jx];
p[indexTable[bits]] = make_float3(jx, jy, jz);
bits += 1;
}
}
}
/* Find the vertices where the surface intersects the cube */
float3 vertlist[12];
if ((edgeTable[cubeindex] & 1) != 0) {
vertlist[0] = vertexInterp(threshold, p[0], p[1], val[0], val[1]);
}
if ((edgeTable[cubeindex] & 2) != 0) {
vertlist[1] = vertexInterp(threshold, p[1], p[2], val[1], val[2]);
}
if ((edgeTable[cubeindex] & 4) != 0) {
vertlist[2] = vertexInterp(threshold, p[2], p[3], val[2], val[3]);
}
if ((edgeTable[cubeindex] & 8) != 0) {
vertlist[3] = vertexInterp(threshold, p[3], p[0], val[3], val[0]);
}
if ((edgeTable[cubeindex] & 16) != 0) {
vertlist[4] = vertexInterp(threshold, p[4], p[5], val[4], val[5]);
}
if ((edgeTable[cubeindex] & 32) != 0) {
vertlist[5] = vertexInterp(threshold, p[5], p[6], val[5], val[6]);
}
if ((edgeTable[cubeindex] & 64) != 0) {
vertlist[6] = vertexInterp(threshold, p[6], p[7], val[6], val[7]);
}
if ((edgeTable[cubeindex] & 128) != 0) {
vertlist[7] = vertexInterp(threshold, p[7], p[4], val[7], val[4]);
}
if ((edgeTable[cubeindex] & 256) != 0) {
vertlist[8] = vertexInterp(threshold, p[0], p[4], val[0], val[4]);
}
if ((edgeTable[cubeindex] & 512) != 0) {
vertlist[9] = vertexInterp(threshold, p[1], p[5], val[1], val[5]);
}
if ((edgeTable[cubeindex] & 1024) != 0) {
vertlist[10] = vertexInterp(threshold, p[2], p[6], val[2], val[6]);
}
if ((edgeTable[cubeindex] & 2048) != 0) {
vertlist[11] = vertexInterp(threshold, p[3], p[7], val[3], val[7]);
}
const int id = (iz * nGrids.y + iy) * nGrids.x + ix;
for (int i = 0; i < 4; i++) {
if (triTable[cubeindex][i * 3 + 0] >= 0) {
float3 tri[3];
for (int k = 0; k < 3; k++) {
tri[k] = vertlist[triTable[cubeindex][i * 3 + k]];
}
for (int k = 0; k < 3; k++) {
vertices[id * 12 + i * 3 + k] = tri[k];
}
ntris_in_cells[id] += 1;
} else {
for (int k = 0; k < 3; k++) {
vertices[id * 12 + i * 3 + k] = make_float3(0.0, 0.0, 0.0);
}
}
}
}
__global__ void compaction(
float3 *vertices,
int *ntris,
int *offsets,
int3 nGrids,
torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> verts,
torch::PackedTensorAccessor32<int, 2, torch::RestrictPtrTraits> faces) {
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
const int iz = blockIdx.z * blockDim.z + threadIdx.z;
const int index = (iz * nGrids.y + iy) * nGrids.x + ix;
const int size = nGrids.x * nGrids.y * nGrids.z;
if (index < size) {
const int start = offsets[index];
const int n = ntris[index];
for (int i = 0; i < n; i++) {
for (int k = 0; k < 3; k++) {
verts[(start + i) * 3 + k][0] = vertices[index * 12 + i * 3 + k].x;
verts[(start + i) * 3 + k][1] = vertices[index * 12 + i * 3 + k].y;
verts[(start + i) * 3 + k][2] = vertices[index * 12 + i * 3 + k].z;
faces[start + i][k] = (start + i) * 3 + k;
}
}
}
}
std::vector<torch::Tensor> mcubes_cuda(torch::Tensor vol, float threshold) {
// Check input tensor
CHECK_CUDA(vol);
CHECK_CONTIGUOUS(vol);
CHECK_IS_FLOAT(vol);
CHECK_N_DIM(vol, 3);
// Transfer table data to device
torch::Tensor edgeTableTensor = torch::zeros({256},
torch::TensorOptions().dtype(at::kInt).device(at::kCPU));
{
auto acsr = edgeTableTensor.accessor<int, 1>();
for (int i = 0; i < 256; i++) {
acsr[i] = edgeTable[i];
}
}
torch::Tensor edgeTableTensorCuda = edgeTableTensor.to(vol.device());
torch::Tensor triTableTensor = torch::zeros({256, 16},
torch::TensorOptions().dtype(at::kInt).device(at::kCPU));
{
auto acsr = triTableTensor.accessor<int, 2>();
for (int i = 0; i < 256; i++) {
for (int j = 0; j < 16; j++) {
acsr[i][j] = triTable[i][j];
}
}
}
torch::Tensor triTableTensorCuda = triTableTensor.to(vol.device());
// Size parameters
const int64_t Nx = vol.size(2);
const int64_t Ny = vol.size(1);
const int64_t Nz = vol.size(0);
const uint32_t BLOCK_SIZE = 8;
const uint32_t gridx = (Nx + BLOCK_SIZE - 1) / BLOCK_SIZE;
const uint32_t gridy = (Ny + BLOCK_SIZE - 1) / BLOCK_SIZE;
const uint32_t gridz = (Nz + BLOCK_SIZE - 1) / BLOCK_SIZE;
const dim3 blocks = { gridx, gridy, gridz };
const dim3 threads = { BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE };
const int3 nGrids = make_int3(Nx, Ny, Nz);
const int dev_id = vol.device().index();
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Allocate vertex buffer
torch::Tensor vert_buffer = torch::zeros({12 * Nx * Ny * Nz * 3},
torch::TensorOptions().dtype(torch::kFloat32).device(vol.device()));
torch::Tensor ntris_in_cells = torch::zeros({12 * Nx * Ny * Nz},
torch::TensorOptions().dtype(torch::kInt32).device(vol.device()));
torch::Tensor offsets = torch::zeros({Nx * Ny * Nz},
torch::TensorOptions().dtype(torch::kInt32).device(vol.device()));
// Kernel call
mcubes_cuda_kernel<<<blocks, threads, 0, stream>>>(
vol.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
(float3*)vert_buffer.data_ptr(),
(int*)ntris_in_cells.data_ptr(),
nGrids,
threshold,
edgeTableTensorCuda.packed_accessor32<int, 1, torch::RestrictPtrTraits>(),
triTableTensorCuda.packed_accessor32<int, 2, torch::RestrictPtrTraits>()
);
cudaDeviceSynchronize();
// Compute number of triangles
prescan((int*)ntris_in_cells.data_ptr(),
(int*)offsets.data_ptr(),
Nx * Ny * Nz, dev_id, stream);
cudaSetDevice(dev_id);
cudaDeviceSynchronize();
cudaSetDevice(dev_id);
const int ntri_last = ntris_in_cells[Nx * Ny * Nz - 1].cpu().item<int>();
const int offset_last = offsets[Nx * Ny * Nz - 1].cpu().item<int>();
// cudaMemcpy(&ntri_last, ntris_in_cells + (Nx * Ny * Nz - 1), sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(&offset_last, offsets + (Nx * Ny * Nz - 1), sizeof(int), cudaMemcpyDeviceToHost);
const int ntris = max(1, ntri_last + offset_last);
// Triangle list compaction
torch::Tensor verts = torch::zeros({ntris * 3, 3},
torch::TensorOptions().dtype(torch::kFloat32).device(vol.device()));
torch::Tensor faces = torch::zeros({ntris, 3},
torch::TensorOptions().dtype(torch::kInt32).device(vol.device()));
compaction<<<blocks, threads, 0, stream>>>(
(float3*)vert_buffer.data_ptr(),
(int*)ntris_in_cells.data_ptr(),
(int*)offsets.data_ptr(),
nGrids,
verts.packed_accessor32<float, 2, torch::RestrictPtrTraits>(),
faces.packed_accessor32<int, 2, torch::RestrictPtrTraits>()
);
cudaDeviceSynchronize();
CUDA_CHECK_ERRORS();
return { verts, faces };
}
|
cd7daa02d0826ba6dfd10545e3fc4950bfc6f60a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zgesellcmv.cu, normal z -> c, Mon Jun 25 18:24:24 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_c
// SELLC SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
__global__ void
cgesellcmv_kernel(
int num_rows,
int num_cols,
int blocksize,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = dcolind [offset+ blocksize * n + threadIdx.x ];
magmaFloatComplex val = dval[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*dx[col];
}
}
dy[ Idx ] = dot * alpha + beta * dy [ Idx ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLC/SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row (=1)
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in SELLC/P
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLC/P
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgesellcmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
// the kernel can only handle up to 65535 slices
// (~2M rows for blocksize 32)
dim3 grid( slices, 1, 1);
magma_int_t threads = blocksize;
hipLaunchKernelGGL(( cgesellcmv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, blocksize, alpha,
dval, dcolind, drowptr, dx, beta, dy );
return MAGMA_SUCCESS;
}
| cd7daa02d0826ba6dfd10545e3fc4950bfc6f60a.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zgesellcmv.cu, normal z -> c, Mon Jun 25 18:24:24 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_c
// SELLC SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
__global__ void
cgesellcmv_kernel(
int num_rows,
int num_cols,
int blocksize,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = dcolind [offset+ blocksize * n + threadIdx.x ];
magmaFloatComplex val = dval[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*dx[col];
}
}
dy[ Idx ] = dot * alpha + beta * dy [ Idx ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLC/SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row (=1)
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in SELLC/P
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLC/P
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgesellcmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
// the kernel can only handle up to 65535 slices
// (~2M rows for blocksize 32)
dim3 grid( slices, 1, 1);
magma_int_t threads = blocksize;
cgesellcmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, blocksize, alpha,
dval, dcolind, drowptr, dx, beta, dy );
return MAGMA_SUCCESS;
}
|
0f7980187dd70e1dd9f523d5bb89e361ac7be578.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void PictureKernell(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 + imageInput[(row*width+col)*3+BLUE]*0.114;
}
} | 0f7980187dd70e1dd9f523d5bb89e361ac7be578.cu | #include "includes.h"
__global__ void PictureKernell(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 + imageInput[(row*width+col)*3+BLUE]*0.114;
}
} |
dd3982fd9002def644a305b3c3436069da9f10dd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "histogram_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
unsigned int *bins = NULL;
hipMalloc(&bins, XSIZE*YSIZE);
unsigned int num_elements = 1;
unsigned int num_bins = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
histogram_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,bins,num_elements,num_bins);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
histogram_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,bins,num_elements,num_bins);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
histogram_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,bins,num_elements,num_bins);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | dd3982fd9002def644a305b3c3436069da9f10dd.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "histogram_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
unsigned int *bins = NULL;
cudaMalloc(&bins, XSIZE*YSIZE);
unsigned int num_elements = 1;
unsigned int num_bins = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
histogram_kernel<<<gridBlock,threadBlock>>>(input,bins,num_elements,num_bins);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
histogram_kernel<<<gridBlock,threadBlock>>>(input,bins,num_elements,num_bins);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
histogram_kernel<<<gridBlock,threadBlock>>>(input,bins,num_elements,num_bins);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c57c87d76756a6338532f092d62bb2763719bc56.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include "hip/hip_runtime.h"
__global__
void GPUFunction()
{
printf("hello from the Gpu.\n");
}
int main()
{
hipLaunchKernelGGL(( GPUFunction), dim3(1), dim3(1), 0, 0, );
hipDeviceSynchronize();
return EXIT_SUCCESS;
}
| c57c87d76756a6338532f092d62bb2763719bc56.cu | #include <cstdio>
#include "cuda.h"
__global__
void GPUFunction()
{
printf("hello from the Gpu.\n");
}
int main()
{
GPUFunction<<<1, 1>>>();
cudaDeviceSynchronize();
return EXIT_SUCCESS;
}
|
efae5bdd05de1a4c3efbe0421ef2e3b8b3a8a2b9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `sum`
#include <cudf/detail/reduction_functions.hpp>
#include "simple_hip.cuh"
std::unique_ptr<cudf::scalar> cudf::reduction::sum(column_view const& col,
cudf::data_type const output_dtype,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
using reducer = cudf::reduction::simple::element_type_dispatcher<cudf::reduction::op::sum>;
return cudf::type_dispatcher(col.type(), reducer(), col, output_dtype, mr, stream);
}
| efae5bdd05de1a4c3efbe0421ef2e3b8b3a8a2b9.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `sum`
#include <cudf/detail/reduction_functions.hpp>
#include "simple.cuh"
std::unique_ptr<cudf::scalar> cudf::reduction::sum(column_view const& col,
cudf::data_type const output_dtype,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
using reducer = cudf::reduction::simple::element_type_dispatcher<cudf::reduction::op::sum>;
return cudf::type_dispatcher(col.type(), reducer(), col, output_dtype, mr, stream);
}
|
bfdcb257c434bc9e2388a84c8424424084858a5c.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void vectorAddGPU(float *a, float *b, float *c, int N) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N) {
c[idx] = a[idx] + b[idx];
}
}
void unified_sample (int size = 1048576) {
int n = size;
int nBytes = n * sizeof(float);
float *a, *b, *c;
hipEvent_t uniStart, uniStop;
hipEventCreate(&uniStart);
hipEventCreate(&uniStop);
dim3 block(256);
dim3 grid((unsigned int)ceil(n/(float)block.x));
// printf("UNI: allocating memory\n");
hipMallocManaged(&a, nBytes);
hipMallocManaged(&b, nBytes);
hipMallocManaged(&c, nBytes);
for (int i = 0; i < n; i++) {
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
hipEventRecord(uniStart);
hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, 0, a, b, c, n);
hipEventRecord(uniStop);
hipDeviceSynchronize();
float ms = 0;
hipEventElapsedTime(&ms, uniStart, uniStop);
printf("UNI: Memalloc(unified memory) + Kernel time is: %f\n", ms);
hipDeviceSynchronize();
}
void pinned_sample (int size = 1048576) {
int n = size;
size_t nBytes = n * sizeof(float);
float *a, *b, *c;
float *d_a, *d_b, *d_c;
// float errNorm, refNorm, ref, diff;
hipEvent_t pinStart, pinStop;
hipEventCreate(&pinStart);
hipEventCreate(&pinStop);
dim3 block(256);
dim3 grid((unsigned int)ceil(n/(float)block.x));
hipHostMalloc(&a, nBytes);
hipHostMalloc(&b, nBytes);
hipHostMalloc(&c, nBytes);
hipMalloc(&d_a, nBytes);
hipMalloc(&d_b, nBytes);
hipMalloc(&d_c, nBytes);
for (int i = 0; i < n; i++) {
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
hipEventRecord(pinStart);
hipMemcpy(d_a, a, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, nBytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, n);
hipEventRecord(pinStop);
hipDeviceSynchronize();
float ms = 0;
hipEventElapsedTime(&ms, pinStart, pinStop);
printf("PIN: Memcpy + Kernel time is: %f\n", ms);
hipDeviceSynchronize();
}
void usual_sample (int size = 1048576) {
int n = size;
int nBytes = n*sizeof(float);
float *a, *b; // host data
float *c; // results
a = (float*)malloc(nBytes);
b = (float*)malloc(nBytes);
c = (float*)malloc(nBytes);
float *a_d,*b_d,*c_d;
dim3 block(256);
dim3 grid((unsigned int)ceil(n/(float)block.x));
for(int i = 0; i < n; i++) {
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
printf("Allocating device memory on host..\n");
hipEvent_t start, stop, malloc_start, malloc_stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&malloc_start);
hipEventCreate(&malloc_stop);
hipMalloc((void **)&a_d,n*sizeof(float));
hipMalloc((void **)&b_d,n*sizeof(float));
hipMalloc((void **)&c_d,n*sizeof(float));
printf("Copying to device..\n");
hipEventRecord(start);
hipMemcpy(a_d, a, n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(b_d, b, n*sizeof(float), hipMemcpyHostToDevice);
printf("Doing GPU Vector add\n");
hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, 0, a_d, b_d, c_d, n);
hipEventRecord(stop);
hipDeviceSynchronize();
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("USUAL Memcpy + Kernel: %f ms\n", milliseconds);
hipDeviceSynchronize();
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
}
int main(int argc, char **argv) {
assert(argc==2);
usual_sample(atoi(argv[1]));
pinned_sample(atoi(argv[1]));
unified_sample(atoi(argv[1]));
return 0;
}
| bfdcb257c434bc9e2388a84c8424424084858a5c.cu | // System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void vectorAddGPU(float *a, float *b, float *c, int N) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N) {
c[idx] = a[idx] + b[idx];
}
}
void unified_sample (int size = 1048576) {
int n = size;
int nBytes = n * sizeof(float);
float *a, *b, *c;
cudaEvent_t uniStart, uniStop;
cudaEventCreate(&uniStart);
cudaEventCreate(&uniStop);
dim3 block(256);
dim3 grid((unsigned int)ceil(n/(float)block.x));
// printf("UNI: allocating memory\n");
cudaMallocManaged(&a, nBytes);
cudaMallocManaged(&b, nBytes);
cudaMallocManaged(&c, nBytes);
for (int i = 0; i < n; i++) {
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
cudaEventRecord(uniStart);
vectorAddGPU<<<grid, block>>>(a, b, c, n);
cudaEventRecord(uniStop);
cudaDeviceSynchronize();
float ms = 0;
cudaEventElapsedTime(&ms, uniStart, uniStop);
printf("UNI: Memalloc(unified memory) + Kernel time is: %f\n", ms);
cudaThreadSynchronize();
}
void pinned_sample (int size = 1048576) {
int n = size;
size_t nBytes = n * sizeof(float);
float *a, *b, *c;
float *d_a, *d_b, *d_c;
// float errNorm, refNorm, ref, diff;
cudaEvent_t pinStart, pinStop;
cudaEventCreate(&pinStart);
cudaEventCreate(&pinStop);
dim3 block(256);
dim3 grid((unsigned int)ceil(n/(float)block.x));
cudaMallocHost(&a, nBytes);
cudaMallocHost(&b, nBytes);
cudaMallocHost(&c, nBytes);
cudaMalloc(&d_a, nBytes);
cudaMalloc(&d_b, nBytes);
cudaMalloc(&d_c, nBytes);
for (int i = 0; i < n; i++) {
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
cudaEventRecord(pinStart);
cudaMemcpy(d_a, a, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, nBytes, cudaMemcpyHostToDevice);
vectorAddGPU<<<grid, block>>>(d_a, d_b, d_c, n);
cudaEventRecord(pinStop);
cudaDeviceSynchronize();
float ms = 0;
cudaEventElapsedTime(&ms, pinStart, pinStop);
printf("PIN: Memcpy + Kernel time is: %f\n", ms);
cudaThreadSynchronize();
}
void usual_sample (int size = 1048576) {
int n = size;
int nBytes = n*sizeof(float);
float *a, *b; // host data
float *c; // results
a = (float*)malloc(nBytes);
b = (float*)malloc(nBytes);
c = (float*)malloc(nBytes);
float *a_d,*b_d,*c_d;
dim3 block(256);
dim3 grid((unsigned int)ceil(n/(float)block.x));
for(int i = 0; i < n; i++) {
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
printf("Allocating device memory on host..\n");
cudaEvent_t start, stop, malloc_start, malloc_stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&malloc_start);
cudaEventCreate(&malloc_stop);
cudaMalloc((void **)&a_d,n*sizeof(float));
cudaMalloc((void **)&b_d,n*sizeof(float));
cudaMalloc((void **)&c_d,n*sizeof(float));
printf("Copying to device..\n");
cudaEventRecord(start);
cudaMemcpy(a_d, a, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, n*sizeof(float), cudaMemcpyHostToDevice);
printf("Doing GPU Vector add\n");
vectorAddGPU<<<grid, block>>>(a_d, b_d, c_d, n);
cudaEventRecord(stop);
cudaDeviceSynchronize();
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("USUAL Memcpy + Kernel: %f ms\n", milliseconds);
cudaThreadSynchronize();
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
int main(int argc, char **argv) {
assert(argc==2);
usual_sample(atoi(argv[1]));
pinned_sample(atoi(argv[1]));
unified_sample(atoi(argv[1]));
return 0;
}
|
36e2ee34b80085d7c247b6026f5c75f1ee66e250.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "../utils/SyncedMemory.h"
#include "lab2.h"
using namespace std;
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
int main(int argc, char **argv)
{
Lab2VideoGenerator g;
Lab2VideoInfo i;
g.get_info(i);
if (i.w == 0 or i.h == 0 or i.n_frame == 0 or i.fps_n == 0 or i.fps_d == 0) {
puts("Cannot be zero");
abort();
} else if (i.w%2 != 0 or i.h%2 != 0) {
puts("Only even frame size is supported");
abort();
}
unsigned FRAME_SIZE = i.w*i.h*3/2;
MemoryBuffer<uint8_t> frameb(FRAME_SIZE);
auto frames = frameb.CreateSync(FRAME_SIZE);
FILE *fp = fopen("result.y4m", "wb");
fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d);
for (unsigned j = 0; j < i.n_frame; ++j) {
fputs("FRAME\n", fp);
g.Generate(frames.get_gpu_wo());
fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE, fp);
}
fclose(fp);
return 0;
}
| 36e2ee34b80085d7c247b6026f5c75f1ee66e250.cu | #include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "../utils/SyncedMemory.h"
#include "lab2.h"
using namespace std;
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
int main(int argc, char **argv)
{
Lab2VideoGenerator g;
Lab2VideoInfo i;
g.get_info(i);
if (i.w == 0 or i.h == 0 or i.n_frame == 0 or i.fps_n == 0 or i.fps_d == 0) {
puts("Cannot be zero");
abort();
} else if (i.w%2 != 0 or i.h%2 != 0) {
puts("Only even frame size is supported");
abort();
}
unsigned FRAME_SIZE = i.w*i.h*3/2;
MemoryBuffer<uint8_t> frameb(FRAME_SIZE);
auto frames = frameb.CreateSync(FRAME_SIZE);
FILE *fp = fopen("result.y4m", "wb");
fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d);
for (unsigned j = 0; j < i.n_frame; ++j) {
fputs("FRAME\n", fp);
g.Generate(frames.get_gpu_wo());
fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE, fp);
}
fclose(fp);
return 0;
}
|
f27d5e25825b9511f96de187cb82ad163cce5fce.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h> // helper functions CUDA error checking and initialization
#include <hip/hip_runtime_api.h>
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int OPT_N = 10000000;
const int NUM_ITERATIONS = 10;
const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Start logs
printf("[%s] - Starting...\n", argv[0]);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
//reading speed for searching
int speed = atoi(argv[1]);
printf("running with speed %d", speed);
double
delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime;
StopWatchInterface *hTimer = NULL;
int i;
findCudaDevice(argc, (const char **)argv);
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(OPT_SZ);
h_PutResultCPU = (float *)malloc(OPT_SZ);
h_CallResultGPU = (float *)malloc(OPT_SZ);
h_PutResultGPU = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
printf("...allocating GPU memory for options.\n");
checkCudaErrors(hipMalloc((void **)&d_CallResult, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_PutResult, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_StockPrice, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_OptionStrike, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_OptionYears, OPT_SZ));
printf("...generating input data in CPU mem.\n");
int iteration = 0;
double accumulate_runtime = 0.0;
double accumulate_option = 0.0;
double runtimes[105];
double error[105];
int lower_bound = 0;
int upper_bound = 100;
int mid = 50;
double error_threshold = 1.0E-07;
bool calibration = true;
for (iteration = 0 ; iteration <30; iteration ++){
srand(iteration);
//Generate options set
for (i = 0; i < OPT_N; i++)
{
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
//binary search
mid = (upper_bound + lower_bound)/2;
// printf(" low %d up %d mid %d \n", lower_bound, upper_bound, (upper_bound + lower_bound)/2 );
printf("\n mid calibration %d \n", mid);
//
// printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(hipMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, hipMemcpyHostToDevice));
// printf("Data init done.\n\n");
runtimes[iteration] = 0;
//if (iteration< 7) calibration = true;
if(calibration){ //calibration run
// printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (i = 0; i < NUM_ITERATIONS; i++)
{
hipLaunchKernelGGL(( BlackScholesGPU), dim3(DIV_UP((OPT_N/2), 128)), dim3(128/*480), 128*/, 0, 0,
(float2 *)d_CallResult,
(float2 *)d_PutResult,
(float2 *)d_StockPrice,
(float2 *)d_OptionStrike,
(float2 *)d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N,
mid
);
getLastCudaError("BlackScholesGPU() execution failed\n");
}
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
runtimes[iteration] = gpuTime;
accumulate_runtime += gpuTime;
accumulate_option += 2 * OPT_N;
//Both call and put is calculated
// printf("Options count : %i \n", 2 * OPT_N);
// printf("BlackScholesGPU() time : %f msec\n", gpuTime);
// printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
printf("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
// printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
// (((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 128);
// printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(hipMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, hipMemcpyDeviceToHost));
// printf("Checking the results...\n");
// printf("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
// printf("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < OPT_N; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n", L1norm);
printf("err error_threshold: %E\n", error_threshold);
if (L1norm < error_threshold){
lower_bound = mid;
} else {
upper_bound = mid;
}
if( upper_bound - lower_bound <= 2)
calibration = false;
}
// printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
int running_speed = mid;
if (calibration)
running_speed = 0;
for (i = 0; i < NUM_ITERATIONS; i++)
{
hipLaunchKernelGGL(( BlackScholesGPU), dim3(DIV_UP((OPT_N/2), 128)), dim3(128/*480), 128*/, 0, 0,
(float2 *)d_CallResult,
(float2 *)d_PutResult,
(float2 *)d_StockPrice,
(float2 *)d_OptionStrike,
(float2 *)d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N,
running_speed
);
getLastCudaError("BlackScholesGPU() execution failed\n");
}
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
//Both call and put is calculated
// printf("Options count : %i \n", 2 * OPT_N);
// printf("BlackScholesGPU() time : %f msec\n", gpuTime);
// printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
printf("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
// printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
// (((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 128);
// printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(hipMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, hipMemcpyDeviceToHost));
// printf("Checking the results...\n");
// printf("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
// printf("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < OPT_N; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n", L1norm);
//repeat wit appropriate token for searching script
printf("tuning_error=%E\n", L1norm);
// printf("Max absolute error: %E\n\n", max_delta);
runtimes[iteration] += gpuTime;
error[iteration] = L1norm;
accumulate_runtime += gpuTime;
accumulate_option += 2 * OPT_N;
}
printf("\n gputime \n");
for (int i =0; i<105 ; i++){
printf("%f, ", runtimes[i]);
}
printf("\n error \n");
for (int i =0; i<105 ; i++){
printf("%.4E, ", error[i]);
}
printf("\n accumulate runtime %f , accumulate_opt %E \n", accumulate_runtime, accumulate_option);
printf("Shutting down...\n");
printf("...releasing GPU memory.\n");
checkCudaErrors(hipFree(d_OptionYears));
checkCudaErrors(hipFree(d_OptionStrike));
checkCudaErrors(hipFree(d_StockPrice));
checkCudaErrors(hipFree(d_PutResult));
checkCudaErrors(hipFree(d_CallResult));
printf("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
sdkDeleteTimer(&hTimer);
printf("Shutdown done.\n");
printf("\n[BlackScholes] - Test Summary\n");
// Calling hipProfilerStop causes all profile data to be
// flushed before the application exits
checkCudaErrors(hipProfilerStop());
if (L1norm > 1e-6)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n\n");
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| f27d5e25825b9511f96de187cb82ad163cce5fce.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h> // helper functions CUDA error checking and initialization
#include <cuda_profiler_api.h>
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int OPT_N = 10000000;
const int NUM_ITERATIONS = 10;
const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Start logs
printf("[%s] - Starting...\n", argv[0]);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
//reading speed for searching
int speed = atoi(argv[1]);
printf("running with speed %d", speed);
double
delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime;
StopWatchInterface *hTimer = NULL;
int i;
findCudaDevice(argc, (const char **)argv);
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(OPT_SZ);
h_PutResultCPU = (float *)malloc(OPT_SZ);
h_CallResultGPU = (float *)malloc(OPT_SZ);
h_PutResultGPU = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
printf("...allocating GPU memory for options.\n");
checkCudaErrors(cudaMalloc((void **)&d_CallResult, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_PutResult, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_StockPrice, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_OptionStrike, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_OptionYears, OPT_SZ));
printf("...generating input data in CPU mem.\n");
int iteration = 0;
double accumulate_runtime = 0.0;
double accumulate_option = 0.0;
double runtimes[105];
double error[105];
int lower_bound = 0;
int upper_bound = 100;
int mid = 50;
double error_threshold = 1.0E-07;
bool calibration = true;
for (iteration = 0 ; iteration <30; iteration ++){
srand(iteration);
//Generate options set
for (i = 0; i < OPT_N; i++)
{
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
//binary search
mid = (upper_bound + lower_bound)/2;
// printf(" low %d up %d mid %d \n", lower_bound, upper_bound, (upper_bound + lower_bound)/2 );
printf("\n mid calibration %d \n", mid);
//
// printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(cudaMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, cudaMemcpyHostToDevice));
// printf("Data init done.\n\n");
runtimes[iteration] = 0;
//if (iteration< 7) calibration = true;
if(calibration){ //calibration run
// printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (i = 0; i < NUM_ITERATIONS; i++)
{
BlackScholesGPU<<<DIV_UP((OPT_N/2), 128), 128/*480, 128*/>>>(
(float2 *)d_CallResult,
(float2 *)d_PutResult,
(float2 *)d_StockPrice,
(float2 *)d_OptionStrike,
(float2 *)d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N,
mid
);
getLastCudaError("BlackScholesGPU() execution failed\n");
}
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
runtimes[iteration] = gpuTime;
accumulate_runtime += gpuTime;
accumulate_option += 2 * OPT_N;
//Both call and put is calculated
// printf("Options count : %i \n", 2 * OPT_N);
// printf("BlackScholesGPU() time : %f msec\n", gpuTime);
// printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
printf("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
// printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
// (((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 128);
// printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(cudaMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost));
// printf("Checking the results...\n");
// printf("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
// printf("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < OPT_N; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n", L1norm);
printf("err error_threshold: %E\n", error_threshold);
if (L1norm < error_threshold){
lower_bound = mid;
} else {
upper_bound = mid;
}
if( upper_bound - lower_bound <= 2)
calibration = false;
}
// printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
int running_speed = mid;
if (calibration)
running_speed = 0;
for (i = 0; i < NUM_ITERATIONS; i++)
{
BlackScholesGPU<<<DIV_UP((OPT_N/2), 128), 128/*480, 128*/>>>(
(float2 *)d_CallResult,
(float2 *)d_PutResult,
(float2 *)d_StockPrice,
(float2 *)d_OptionStrike,
(float2 *)d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N,
running_speed
);
getLastCudaError("BlackScholesGPU() execution failed\n");
}
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
//Both call and put is calculated
// printf("Options count : %i \n", 2 * OPT_N);
// printf("BlackScholesGPU() time : %f msec\n", gpuTime);
// printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
printf("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
// printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
// (((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 128);
// printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(cudaMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost));
// printf("Checking the results...\n");
// printf("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
// printf("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < OPT_N; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n", L1norm);
//repeat wit appropriate token for searching script
printf("tuning_error=%E\n", L1norm);
// printf("Max absolute error: %E\n\n", max_delta);
runtimes[iteration] += gpuTime;
error[iteration] = L1norm;
accumulate_runtime += gpuTime;
accumulate_option += 2 * OPT_N;
}
printf("\n gputime \n");
for (int i =0; i<105 ; i++){
printf("%f, ", runtimes[i]);
}
printf("\n error \n");
for (int i =0; i<105 ; i++){
printf("%.4E, ", error[i]);
}
printf("\n accumulate runtime %f , accumulate_opt %E \n", accumulate_runtime, accumulate_option);
printf("Shutting down...\n");
printf("...releasing GPU memory.\n");
checkCudaErrors(cudaFree(d_OptionYears));
checkCudaErrors(cudaFree(d_OptionStrike));
checkCudaErrors(cudaFree(d_StockPrice));
checkCudaErrors(cudaFree(d_PutResult));
checkCudaErrors(cudaFree(d_CallResult));
printf("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
sdkDeleteTimer(&hTimer);
printf("Shutdown done.\n");
printf("\n[BlackScholes] - Test Summary\n");
// Calling cudaProfilerStop causes all profile data to be
// flushed before the application exits
checkCudaErrors(cudaProfilerStop());
if (L1norm > 1e-6)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n\n");
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
0a55aef5a2af8702965b66a9860d09d9f0b2468b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "CalculateDistanceAllPoints.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *data_x_d = NULL;
hipMalloc(&data_x_d, XSIZE*YSIZE);
double *data_y_d = NULL;
hipMalloc(&data_y_d, XSIZE*YSIZE);
double *data_z_d = NULL;
hipMalloc(&data_z_d, XSIZE*YSIZE);
double *transformed_data_x_d = NULL;
hipMalloc(&transformed_data_x_d, XSIZE*YSIZE);
double *transformed_data_y_d = NULL;
hipMalloc(&transformed_data_y_d, XSIZE*YSIZE);
double *transformed_data_z_d = NULL;
hipMalloc(&transformed_data_z_d, XSIZE*YSIZE);
int *index_d = NULL;
hipMalloc(&index_d, XSIZE*YSIZE);
double *distance_d = NULL;
hipMalloc(&distance_d, XSIZE*YSIZE);
int size_data = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
CalculateDistanceAllPoints), dim3(gridBlock),dim3(threadBlock), 0, 0, data_x_d,data_y_d,data_z_d,transformed_data_x_d,transformed_data_y_d,transformed_data_z_d,index_d,distance_d,size_data);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
CalculateDistanceAllPoints), dim3(gridBlock),dim3(threadBlock), 0, 0, data_x_d,data_y_d,data_z_d,transformed_data_x_d,transformed_data_y_d,transformed_data_z_d,index_d,distance_d,size_data);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
CalculateDistanceAllPoints), dim3(gridBlock),dim3(threadBlock), 0, 0, data_x_d,data_y_d,data_z_d,transformed_data_x_d,transformed_data_y_d,transformed_data_z_d,index_d,distance_d,size_data);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0a55aef5a2af8702965b66a9860d09d9f0b2468b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "CalculateDistanceAllPoints.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *data_x_d = NULL;
cudaMalloc(&data_x_d, XSIZE*YSIZE);
double *data_y_d = NULL;
cudaMalloc(&data_y_d, XSIZE*YSIZE);
double *data_z_d = NULL;
cudaMalloc(&data_z_d, XSIZE*YSIZE);
double *transformed_data_x_d = NULL;
cudaMalloc(&transformed_data_x_d, XSIZE*YSIZE);
double *transformed_data_y_d = NULL;
cudaMalloc(&transformed_data_y_d, XSIZE*YSIZE);
double *transformed_data_z_d = NULL;
cudaMalloc(&transformed_data_z_d, XSIZE*YSIZE);
int *index_d = NULL;
cudaMalloc(&index_d, XSIZE*YSIZE);
double *distance_d = NULL;
cudaMalloc(&distance_d, XSIZE*YSIZE);
int size_data = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
CalculateDistanceAllPoints<<<gridBlock,threadBlock>>>(data_x_d,data_y_d,data_z_d,transformed_data_x_d,transformed_data_y_d,transformed_data_z_d,index_d,distance_d,size_data);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
CalculateDistanceAllPoints<<<gridBlock,threadBlock>>>(data_x_d,data_y_d,data_z_d,transformed_data_x_d,transformed_data_y_d,transformed_data_z_d,index_d,distance_d,size_data);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
CalculateDistanceAllPoints<<<gridBlock,threadBlock>>>(data_x_d,data_y_d,data_z_d,transformed_data_x_d,transformed_data_y_d,transformed_data_z_d,index_d,distance_d,size_data);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
133e545cccb3b8c209dea47ea829ba0522e148f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <ctime>
#include "sort.h"
#include "utils.h"
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef NUM_STREAMS
#define NUM_STREAMS 1
#endif
void cudaTest(hipError_t error) {
if (error != hipSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
exit (EXIT_FAILURE);
}
}
void print(uint* host_data, uint n, uint m) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
for (uint j = 0; j < m; j++) {
std::cout << host_data[i * m + j] << " ";
}
std::cout << "\n";
}
}
////////////////////////////////////////////////////////////////////////////////
// Verify the results.
////////////////////////////////////////////////////////////////////////////////
void check_results(int n, int m, unsigned int *results_h)
{
for (int i = 0 ; i < n ; ++i) {
for (uint j = 1; j < m; j++) {
if (results_h[i*m +j -1] > results_h[i*m +j])
{
std::cout << "Invalid item[" << j-1 << "]: " << results_h[i*m +j -1] << " greater than " << results_h[i*m +j] << std::endl;
exit(EXIT_FAILURE);
}
}
}
std::cout << "OK" << std::endl;
}
int main(int argc, char** argv) {
uint num_of_segments;
uint num_of_elements;
scanf("%d", &num_of_segments);
uint mem_size_seg = sizeof(uint) * (num_of_segments);
uint *h_seg = (uint *) malloc(mem_size_seg);
for (int i = 0; i < num_of_segments+1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
uint mem_size_vec = sizeof(uint) * num_of_elements;
uint *h_vec = (uint *) malloc(mem_size_vec);
for (int i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
uint *d_out, *d_vec;
hipStream_t streams[NUM_STREAMS];
for (int i = 0; i < NUM_STREAMS; i++) {
hipStreamCreate(&streams[i]);
}
int nstreams = NUM_STREAMS;
if (NUM_STREAMS > num_of_segments)
nstreams = num_of_segments;
cudaTest(hipMalloc((void **) &d_vec, mem_size_vec));
cudaTest(hipMalloc((void **) &d_out, mem_size_vec));
for (uint j = 0; j < EXECUTIONS; j++) {
// copy host memory to device
//cudaTest(hipMemcpy(d_out, h_seg, mem_size_seg, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_vec, h_vec, mem_size_vec, hipMemcpyHostToDevice));
hipEventRecord(start);
for(int i = 0; i < num_of_segments; i+=nstreams) {
for (int s = 0; s < nstreams; s++) {
radix_sort(d_out + h_seg[i+s], d_vec + h_seg[i+s], num_of_elements/num_of_segments, streams[s]);
}
}
hipEventRecord(stop);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("Async kernel error: %s\n", hipGetErrorString(errAsync));
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
hipDeviceSynchronize();
}
cudaTest(hipMemcpy(h_vec, d_out, mem_size_vec, hipMemcpyDeviceToHost));
hipFree(d_vec);
hipFree(d_out);
if (ELAPSED_TIME != 1) {
//print(h_vec, num_of_segments, num_of_elements/num_of_segments);
check_results(num_of_segments, num_of_elements/num_of_segments, h_vec);
}
free(h_seg);
free(h_vec);
return 0;
}
| 133e545cccb3b8c209dea47ea829ba0522e148f5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <ctime>
#include "sort.h"
#include "utils.h"
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef NUM_STREAMS
#define NUM_STREAMS 1
#endif
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit (EXIT_FAILURE);
}
}
void print(uint* host_data, uint n, uint m) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
for (uint j = 0; j < m; j++) {
std::cout << host_data[i * m + j] << " ";
}
std::cout << "\n";
}
}
////////////////////////////////////////////////////////////////////////////////
// Verify the results.
////////////////////////////////////////////////////////////////////////////////
void check_results(int n, int m, unsigned int *results_h)
{
for (int i = 0 ; i < n ; ++i) {
for (uint j = 1; j < m; j++) {
if (results_h[i*m +j -1] > results_h[i*m +j])
{
std::cout << "Invalid item[" << j-1 << "]: " << results_h[i*m +j -1] << " greater than " << results_h[i*m +j] << std::endl;
exit(EXIT_FAILURE);
}
}
}
std::cout << "OK" << std::endl;
}
int main(int argc, char** argv) {
uint num_of_segments;
uint num_of_elements;
scanf("%d", &num_of_segments);
uint mem_size_seg = sizeof(uint) * (num_of_segments);
uint *h_seg = (uint *) malloc(mem_size_seg);
for (int i = 0; i < num_of_segments+1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
uint mem_size_vec = sizeof(uint) * num_of_elements;
uint *h_vec = (uint *) malloc(mem_size_vec);
for (int i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
uint *d_out, *d_vec;
cudaStream_t streams[NUM_STREAMS];
for (int i = 0; i < NUM_STREAMS; i++) {
cudaStreamCreate(&streams[i]);
}
int nstreams = NUM_STREAMS;
if (NUM_STREAMS > num_of_segments)
nstreams = num_of_segments;
cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_out, mem_size_vec));
for (uint j = 0; j < EXECUTIONS; j++) {
// copy host memory to device
//cudaTest(cudaMemcpy(d_out, h_seg, mem_size_seg, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice));
cudaEventRecord(start);
for(int i = 0; i < num_of_segments; i+=nstreams) {
for (int s = 0; s < nstreams; s++) {
radix_sort(d_out + h_seg[i+s], d_vec + h_seg[i+s], num_of_elements/num_of_segments, streams[s]);
}
}
cudaEventRecord(stop);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
cudaDeviceSynchronize();
}
cudaTest(cudaMemcpy(h_vec, d_out, mem_size_vec, cudaMemcpyDeviceToHost));
cudaFree(d_vec);
cudaFree(d_out);
if (ELAPSED_TIME != 1) {
//print(h_vec, num_of_segments, num_of_elements/num_of_segments);
check_results(num_of_segments, num_of_elements/num_of_segments, h_vec);
}
free(h_seg);
free(h_vec);
return 0;
}
|
3edaeaa6269317c30c60c66a6a0cfb15416861f3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates how use texture fetches in CUDA
*
* This sample takes an input PGM image (image_filename) and generates
* an output PGM image (image_filename_out). This CUDA kernel performs
* a simple 2D transform (rotation) on the texture coordinates (u,v).
*/
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// Includes CUDA
#include <hip/hip_runtime.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#define MAX_EPSILON_ERROR 5e-3f
// Define the files that are to be save and the reference images for validation
const char *imageFilename = "thanos.pgm";
const char *refFilename = "ref_rotated.pgm";
const char *sampleName = "simpleTexture";
////////////////////////////////////////////////////////////////////////////////
// Texture reference for 2D float texture
texture<float, 2, hipReadModeElementType> tex;
bool testResult = true;
////////////////////////////////////////////////////////////////////////////////
//! Transform an image using texture lookups
//! @param outputData output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void transformKernel(float* dData, int height, int width, float* mask, int masksize)
{
// calculate normalized texture coordinates
unsigned int col = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y*blockDim.y + threadIdx.y;
int S = (masksize-1)/2;
float sum = 0.0;
int pixPos = row*width + col;
dData[pixPos] = 0.0;
for(int maskrow = -S; maskrow <= S; maskrow++){
for(int maskcol = -S; maskcol <= S; maskcol++){
int maskP = (maskrow+S)*masksize + (maskcol+S);
sum += mask[maskP] * tex2D(tex, (col+maskcol+0.5f)/(float)width, (row+maskrow+0.5f)/(float)height);
}
}
dData[pixPos] = sum;
if (dData[pixPos] < 0){
dData[pixPos] = 0;
}
else if(dData[pixPos] > 1){
dData[pixPos] = 1;
}
}
////////////////////////////////////////////////////////////////////////////////
// Declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("%s starting...\n", sampleName);
runTest(argc, argv);
hipDeviceReset();
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv)
{
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
int devID = findCudaDevice(argc, (const char **) argv);
// load image from disk
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
// int masksize = 3;
// int masksize = 5;
int masksize = 7;
float* dFilter = NULL;
int fsize = masksize*masksize*sizeof(float);
checkCudaErrors(hipMalloc((void** )&dFilter,fsize));
float hEdge3[] = {-1.0, 0.0, 1.0, -2.0, 0.0, 2.0, -1.0, 0.0, 1.0};
float hSharpen3[] = {-1.0, -1.0, -1.0, -1.0, 9, -1.0, -1.0, -1.0, -1.0};
float hAverage3[] = {0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111};
//
float hSharpen5[] = {-1.0, -1.0, -1.0, -1.0, -1.0,
-1.0, -1.0, -1.0, -1.0, -1.0,
-1.0, -1.0, 25, -1.0, -1.0,
-1.0, -1.0, -1.0, -1.0, -1.0,
-1.0, -1.0, -1.0, -1.0, -1.0};
// //
float hAverage5[] = {0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,};
float hSharpen7[] = {-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, 49, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0};
//
float hAverage7[] = {1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49};
checkCudaErrors(hipMemcpy(dFilter, hAverage7, fsize, hipMemcpyHostToDevice));
// Allocate device memory for result
float *dData = NULL;
checkCudaErrors(hipMalloc((void **) &dData, size));
// Allocate array and copy image data
hipChannelFormatDesc channelDesc =
hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *cuArray;
checkCudaErrors(hipMallocArray(&cuArray,
&channelDesc,
width,
height));
checkCudaErrors(hipMemcpyToArray(cuArray,
0,
0,
hData,
size,
hipMemcpyHostToDevice));
// Set texture parameters
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
tex.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
checkCudaErrors(hipBindTextureToArray(tex, cuArray, channelDesc));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// Warmup
//hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dData, width, height, angle);
checkCudaErrors(hipDeviceSynchronize());
// Execute the kernel
StopWatchInterface *timer2 = NULL;
sdkCreateTimer(&timer2);
sdkStartTimer(&timer2);
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dData, width, height, dFilter, masksize);
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&timer2);
float t2 = sdkGetTimerValue(&timer)/1000.0f;
printf("Texture memory took %f \n", t2);
sdkDeleteTimer(&timer2);
// Check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
printf("%.2f Mpixels/sec\n",
(width *height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
// Allocate mem for the result on host side
float *hOutputData = (float *) malloc(size);
// copy result from device to host
checkCudaErrors(hipMemcpy(hOutputData, dData, size, hipMemcpyDeviceToHost));
// Write result to file
char outputFilename[1024];
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_TEXedge.pgm");
sdkSavePGM(outputFilename, hOutputData, width, height);
printf("Wrote '%s'\n", outputFilename);
sdkStopTimer(&timer);
float t = sdkGetTimerValue(&timer) / 1000.0f;
printf("Texture overhead took %f time \n", t-t2);
sdkDeleteTimer(&timer);
// Write regression file if necessary
if (checkCmdLineFlag(argc, (const char **) argv, "regression"))
{
// Write file for regression test
sdkWriteFile<float>("./data/regression.dat",
hOutputData,
width*height,
0.0f,
false);
}
else
{
// We need to reload the data from disk,
// because it is inverted upon output
sdkLoadPGM(outputFilename, &hOutputData, &width, &height);
printf("Comparing files\n");
printf("\toutput: <%s>\n", outputFilename);
}
checkCudaErrors(hipFree(dData));
checkCudaErrors(hipFreeArray(cuArray));
free(imagePath);
// free(refPath);
}
| 3edaeaa6269317c30c60c66a6a0cfb15416861f3.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates how use texture fetches in CUDA
*
* This sample takes an input PGM image (image_filename) and generates
* an output PGM image (image_filename_out). This CUDA kernel performs
* a simple 2D transform (rotation) on the texture coordinates (u,v).
*/
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// Includes CUDA
#include <cuda_runtime.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#define MAX_EPSILON_ERROR 5e-3f
// Define the files that are to be save and the reference images for validation
const char *imageFilename = "thanos.pgm";
const char *refFilename = "ref_rotated.pgm";
const char *sampleName = "simpleTexture";
////////////////////////////////////////////////////////////////////////////////
// Texture reference for 2D float texture
texture<float, 2, cudaReadModeElementType> tex;
bool testResult = true;
////////////////////////////////////////////////////////////////////////////////
//! Transform an image using texture lookups
//! @param outputData output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void transformKernel(float* dData, int height, int width, float* mask, int masksize)
{
// calculate normalized texture coordinates
unsigned int col = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y*blockDim.y + threadIdx.y;
int S = (masksize-1)/2;
float sum = 0.0;
int pixPos = row*width + col;
dData[pixPos] = 0.0;
for(int maskrow = -S; maskrow <= S; maskrow++){
for(int maskcol = -S; maskcol <= S; maskcol++){
int maskP = (maskrow+S)*masksize + (maskcol+S);
sum += mask[maskP] * tex2D(tex, (col+maskcol+0.5f)/(float)width, (row+maskrow+0.5f)/(float)height);
}
}
dData[pixPos] = sum;
if (dData[pixPos] < 0){
dData[pixPos] = 0;
}
else if(dData[pixPos] > 1){
dData[pixPos] = 1;
}
}
////////////////////////////////////////////////////////////////////////////////
// Declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("%s starting...\n", sampleName);
runTest(argc, argv);
cudaDeviceReset();
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv)
{
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
int devID = findCudaDevice(argc, (const char **) argv);
// load image from disk
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
// int masksize = 3;
// int masksize = 5;
int masksize = 7;
float* dFilter = NULL;
int fsize = masksize*masksize*sizeof(float);
checkCudaErrors(cudaMalloc((void** )&dFilter,fsize));
float hEdge3[] = {-1.0, 0.0, 1.0, -2.0, 0.0, 2.0, -1.0, 0.0, 1.0};
float hSharpen3[] = {-1.0, -1.0, -1.0, -1.0, 9, -1.0, -1.0, -1.0, -1.0};
float hAverage3[] = {0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111};
//
float hSharpen5[] = {-1.0, -1.0, -1.0, -1.0, -1.0,
-1.0, -1.0, -1.0, -1.0, -1.0,
-1.0, -1.0, 25, -1.0, -1.0,
-1.0, -1.0, -1.0, -1.0, -1.0,
-1.0, -1.0, -1.0, -1.0, -1.0};
// //
float hAverage5[] = {0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,};
float hSharpen7[] = {-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, 49, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0,
-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0};
//
float hAverage7[] = {1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49,
1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49};
checkCudaErrors(cudaMemcpy(dFilter, hAverage7, fsize, cudaMemcpyHostToDevice));
// Allocate device memory for result
float *dData = NULL;
checkCudaErrors(cudaMalloc((void **) &dData, size));
// Allocate array and copy image data
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *cuArray;
checkCudaErrors(cudaMallocArray(&cuArray,
&channelDesc,
width,
height));
checkCudaErrors(cudaMemcpyToArray(cuArray,
0,
0,
hData,
size,
cudaMemcpyHostToDevice));
// Set texture parameters
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
checkCudaErrors(cudaBindTextureToArray(tex, cuArray, channelDesc));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// Warmup
// transformKernel<<<dimGrid, dimBlock, 0>>>(dData, width, height, angle);
checkCudaErrors(cudaDeviceSynchronize());
// Execute the kernel
StopWatchInterface *timer2 = NULL;
sdkCreateTimer(&timer2);
sdkStartTimer(&timer2);
transformKernel<<<dimGrid, dimBlock, 0>>>(dData, width, height, dFilter, masksize);
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&timer2);
float t2 = sdkGetTimerValue(&timer)/1000.0f;
printf("Texture memory took %f \n", t2);
sdkDeleteTimer(&timer2);
// Check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
printf("%.2f Mpixels/sec\n",
(width *height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
// Allocate mem for the result on host side
float *hOutputData = (float *) malloc(size);
// copy result from device to host
checkCudaErrors(cudaMemcpy(hOutputData, dData, size, cudaMemcpyDeviceToHost));
// Write result to file
char outputFilename[1024];
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_TEXedge.pgm");
sdkSavePGM(outputFilename, hOutputData, width, height);
printf("Wrote '%s'\n", outputFilename);
sdkStopTimer(&timer);
float t = sdkGetTimerValue(&timer) / 1000.0f;
printf("Texture overhead took %f time \n", t-t2);
sdkDeleteTimer(&timer);
// Write regression file if necessary
if (checkCmdLineFlag(argc, (const char **) argv, "regression"))
{
// Write file for regression test
sdkWriteFile<float>("./data/regression.dat",
hOutputData,
width*height,
0.0f,
false);
}
else
{
// We need to reload the data from disk,
// because it is inverted upon output
sdkLoadPGM(outputFilename, &hOutputData, &width, &height);
printf("Comparing files\n");
printf("\toutput: <%s>\n", outputFilename);
}
checkCudaErrors(cudaFree(dData));
checkCudaErrors(cudaFreeArray(cuArray));
free(imagePath);
// free(refPath);
}
|
0c9ae23e8c365ccd268324b2c8505576036ac3b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "incSumScan_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *d_outVals = NULL;
hipMalloc(&d_outVals, XSIZE*YSIZE);
unsigned int *d_inVals = NULL;
hipMalloc(&d_inVals, XSIZE*YSIZE);
size_t numVals = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
incSumScan_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_outVals,d_inVals,numVals);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
incSumScan_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_outVals,d_inVals,numVals);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
incSumScan_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_outVals,d_inVals,numVals);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0c9ae23e8c365ccd268324b2c8505576036ac3b8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "incSumScan_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *d_outVals = NULL;
cudaMalloc(&d_outVals, XSIZE*YSIZE);
unsigned int *d_inVals = NULL;
cudaMalloc(&d_inVals, XSIZE*YSIZE);
size_t numVals = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
incSumScan_kernel<<<gridBlock,threadBlock>>>(d_outVals,d_inVals,numVals);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
incSumScan_kernel<<<gridBlock,threadBlock>>>(d_outVals,d_inVals,numVals);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
incSumScan_kernel<<<gridBlock,threadBlock>>>(d_outVals,d_inVals,numVals);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cc27b1e8a106e51aa792a26325d0d7f3ced48efe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// CS3700 Example matrix multpilcation using GPU
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define TILE_WIDTH 2
#define WIDTH 6
// Kernel function execute by the device (GPU)
__global__ void
product (float *d_a, float *d_b, float *d_c, const int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x ;
int row = blockIdx.y * blockDim.y + threadIdx.y ;
float sum = 0;
if (row < n && col < n) {
for (int i = 0 ; i<n ; ++i) {
sum += d_a[row * n + i ] * d_b[i * n + col] ;
}
d_c[row * n + col] = sum;
}
}
__global__ void
sum (float *d_a, float *d_b, float *d_c, const int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y ;
for (int i = 0 ; i<n ; ++i) {
d_c[row * n + i] = d_a[row * n + i] + d_b[row * n + i];
}
}
// Utility function to print the input matrix
void printMatrix (float m[][WIDTH]) {
int i, j;
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j< WIDTH; ++j) {
printf ("%d\t", (int)m[i][j]);
}
printf ("\n");
}
}
// Main function execute by the host (CPU)
int main () {
// host matrices
float host_a[WIDTH][WIDTH],
host_b[WIDTH][WIDTH],
host_c[WIDTH][WIDTH];
// device arrays
float *device_a, *device_b, *device_c;
int i, j;
// initialize host matrices using random numbers
time_t t;
srand ((unsigned) time(&t));
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j<WIDTH; j++) {
host_a[i][j] = (float) (rand() % 50);
host_b[i][j] = (float) (rand() % 50);
}
}
printf ("Matrix A:\n");
printMatrix (host_a);
printf ("\n");
printf ("Matrix B:\n");
printMatrix (host_b);
printf ("\n");
// allocate device memory for input matrices
size_t deviceSize = WIDTH * WIDTH * sizeof (float);
hipMalloc ((void **) &device_a, deviceSize);
hipMalloc ((void **) &device_b, deviceSize);
// copy host matrices to device
hipMemcpy (device_a, host_a, deviceSize, hipMemcpyHostToDevice );
hipMemcpy (device_b, host_b, deviceSize, hipMemcpyHostToDevice );
// allocate device memory to store computed result
hipMalloc((void **) &device_c, deviceSize) ;
dim3 dimBlock (WIDTH, WIDTH);
dim3 dimGrid (WIDTH/TILE_WIDTH, WIDTH/TILE_WIDTH);
hipLaunchKernelGGL(( product), dim3(dimGrid), dim3(dimBlock), 0, 0, device_a, device_b, device_c, WIDTH);
// copy result from device back to host
hipMemcpy (host_c, device_c, deviceSize, hipMemcpyDeviceToHost);
// output the computed result matrix
printf ("A x B: \n");
printMatrix (host_c);
printf ("\n");
hipLaunchKernelGGL(( sum), dim3(dimGrid), dim3(dimBlock), 0, 0, device_a, device_b, device_c, WIDTH);
// copy result from device back to host
hipMemcpy (host_c, device_c, deviceSize, hipMemcpyDeviceToHost);
printf ("A + B: \n");
printMatrix (host_c);
printf ("\n");
hipFree (device_a);
hipFree (device_b);
hipFree (device_c);
return 0;
} | cc27b1e8a106e51aa792a26325d0d7f3ced48efe.cu | // CS3700 Example matrix multpilcation using GPU
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define TILE_WIDTH 2
#define WIDTH 6
// Kernel function execute by the device (GPU)
__global__ void
product (float *d_a, float *d_b, float *d_c, const int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x ;
int row = blockIdx.y * blockDim.y + threadIdx.y ;
float sum = 0;
if (row < n && col < n) {
for (int i = 0 ; i<n ; ++i) {
sum += d_a[row * n + i ] * d_b[i * n + col] ;
}
d_c[row * n + col] = sum;
}
}
__global__ void
sum (float *d_a, float *d_b, float *d_c, const int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y ;
for (int i = 0 ; i<n ; ++i) {
d_c[row * n + i] = d_a[row * n + i] + d_b[row * n + i];
}
}
// Utility function to print the input matrix
void printMatrix (float m[][WIDTH]) {
int i, j;
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j< WIDTH; ++j) {
printf ("%d\t", (int)m[i][j]);
}
printf ("\n");
}
}
// Main function execute by the host (CPU)
int main () {
// host matrices
float host_a[WIDTH][WIDTH],
host_b[WIDTH][WIDTH],
host_c[WIDTH][WIDTH];
// device arrays
float *device_a, *device_b, *device_c;
int i, j;
// initialize host matrices using random numbers
time_t t;
srand ((unsigned) time(&t));
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j<WIDTH; j++) {
host_a[i][j] = (float) (rand() % 50);
host_b[i][j] = (float) (rand() % 50);
}
}
printf ("Matrix A:\n");
printMatrix (host_a);
printf ("\n");
printf ("Matrix B:\n");
printMatrix (host_b);
printf ("\n");
// allocate device memory for input matrices
size_t deviceSize = WIDTH * WIDTH * sizeof (float);
cudaMalloc ((void **) &device_a, deviceSize);
cudaMalloc ((void **) &device_b, deviceSize);
// copy host matrices to device
cudaMemcpy (device_a, host_a, deviceSize, cudaMemcpyHostToDevice );
cudaMemcpy (device_b, host_b, deviceSize, cudaMemcpyHostToDevice );
// allocate device memory to store computed result
cudaMalloc((void **) &device_c, deviceSize) ;
dim3 dimBlock (WIDTH, WIDTH);
dim3 dimGrid (WIDTH/TILE_WIDTH, WIDTH/TILE_WIDTH);
product<<<dimGrid, dimBlock>>> (device_a, device_b, device_c, WIDTH);
// copy result from device back to host
cudaMemcpy (host_c, device_c, deviceSize, cudaMemcpyDeviceToHost);
// output the computed result matrix
printf ("A x B: \n");
printMatrix (host_c);
printf ("\n");
sum<<<dimGrid, dimBlock>>> (device_a, device_b, device_c, WIDTH);
// copy result from device back to host
cudaMemcpy (host_c, device_c, deviceSize, cudaMemcpyDeviceToHost);
printf ("A + B: \n");
printMatrix (host_c);
printf ("\n");
cudaFree (device_a);
cudaFree (device_b);
cudaFree (device_c);
return 0;
} |
26a0f9a6b3ef7c87c5769e83dc62c7e045c0ac08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "common_hip.cuh"
namespace ML {
namespace fil {
using namespace MLCommon;
// vec wraps float[N] for hipcub::BlockReduce
template <int N>
struct vec {
float data[N];
__host__ __device__ float& operator[](int i) { return data[i]; }
__host__ __device__ float operator[](int i) const { return data[i]; }
friend __host__ __device__ vec<N> operator+(const vec<N>& a,
const vec<N>& b) {
vec<N> r;
#pragma unroll
for (int i = 0; i < N; ++i) r[i] = a[i] + b[i];
return r;
}
};
template <int NITEMS, typename tree_type>
__device__ __forceinline__ void infer_one_tree(tree_type tree, float* sdata,
int cols, vec<NITEMS>& out) {
int curr[NITEMS];
int mask = (1 << NITEMS) - 1; // all active
for (int j = 0; j < NITEMS; ++j) curr[j] = 0;
do {
#pragma unroll
for (int j = 0; j < NITEMS; ++j) {
if ((mask >> j) & 1 == 0) continue;
auto n = tree[curr[j]];
if (n.is_leaf()) {
mask &= ~(1 << j);
continue;
}
float val = sdata[j * cols + n.fid()];
bool cond = isnan(val) ? !n.def_left() : val >= n.thresh();
curr[j] = n.left(curr[j]) + cond;
}
} while (mask != 0);
#pragma unroll
for (int j = 0; j < NITEMS; ++j) out[j] += tree[curr[j]].output();
}
template <typename tree_type>
__device__ __forceinline__ void infer_one_tree(tree_type tree, float* sdata,
int cols, vec<1>& out) {
int curr = 0;
for (;;) {
auto n = tree[curr];
if (n.is_leaf()) break;
float val = sdata[n.fid()];
bool cond = isnan(val) ? !n.def_left() : val >= n.thresh();
curr = n.left(curr) + cond;
}
out[0] = tree[curr].output();
}
template <int NITEMS, typename storage_type>
__global__ void infer_k(storage_type forest, predict_params params) {
// cache the row for all threads to reuse
extern __shared__ char smem[];
float* sdata = (float*)smem;
size_t rid = blockIdx.x * NITEMS;
for (int j = 0; j < NITEMS; ++j) {
for (int i = threadIdx.x; i < params.num_cols; i += blockDim.x) {
size_t row = rid + j;
sdata[j * params.num_cols + i] =
row < params.num_rows ? params.data[row * params.num_cols + i] : 0.0f;
}
}
__syncthreads();
// one block works on NITEMS rows and the whole forest
vec<NITEMS> out;
for (int i = 0; i < NITEMS; ++i) out[i] = 0.0f;
for (int j = threadIdx.x; j < forest.num_trees(); j += blockDim.x) {
infer_one_tree<NITEMS>(forest[j], sdata, params.num_cols, out);
}
using BlockReduce = hipcub::BlockReduce<vec<NITEMS>, FIL_TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
out = BlockReduce(tmp_storage).Sum(out);
if (threadIdx.x == 0) {
for (int i = 0; i < NITEMS; ++i) {
int row = blockIdx.x * NITEMS + i;
if (row < params.num_rows)
params.preds[row * params.num_output_classes] = out[i];
}
}
}
template <typename storage_type>
void infer(storage_type forest, predict_params params, hipStream_t stream) {
const int MAX_BATCH_ITEMS = 4;
params.max_items =
params.algo == algo_t::BATCH_TREE_REORG ? MAX_BATCH_ITEMS : 1;
int num_items = params.max_shm / (sizeof(float) * params.num_cols);
if (num_items == 0) {
int max_cols = params.max_shm / sizeof(float);
ASSERT(false, "p.num_cols == %d: too many features, only %d allowed",
params.num_cols, max_cols);
}
num_items = ::min(num_items, params.max_items);
int num_blocks = ceildiv(int(params.num_rows), num_items);
int shm_sz = num_items * sizeof(float) * params.num_cols;
switch (num_items) {
case 1:
hipLaunchKernelGGL(( infer_k<1>), dim3(num_blocks), dim3(FIL_TPB), shm_sz, stream, forest, params);
break;
case 2:
hipLaunchKernelGGL(( infer_k<2>), dim3(num_blocks), dim3(FIL_TPB), shm_sz, stream, forest, params);
break;
case 3:
hipLaunchKernelGGL(( infer_k<3>), dim3(num_blocks), dim3(FIL_TPB), shm_sz, stream, forest, params);
break;
case 4:
hipLaunchKernelGGL(( infer_k<4>), dim3(num_blocks), dim3(FIL_TPB), shm_sz, stream, forest, params);
break;
default:
ASSERT(false, "internal error: nitems > 4");
}
CUDA_CHECK(hipPeekAtLastError());
}
template void infer<dense_storage>(dense_storage forest, predict_params params,
hipStream_t stream);
template void infer<sparse_storage>(sparse_storage forest,
predict_params params, hipStream_t stream);
} // namespace fil
} // namespace ML
| 26a0f9a6b3ef7c87c5769e83dc62c7e045c0ac08.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "common.cuh"
namespace ML {
namespace fil {
using namespace MLCommon;
// vec wraps float[N] for cub::BlockReduce
template <int N>
struct vec {
float data[N];
__host__ __device__ float& operator[](int i) { return data[i]; }
__host__ __device__ float operator[](int i) const { return data[i]; }
friend __host__ __device__ vec<N> operator+(const vec<N>& a,
const vec<N>& b) {
vec<N> r;
#pragma unroll
for (int i = 0; i < N; ++i) r[i] = a[i] + b[i];
return r;
}
};
template <int NITEMS, typename tree_type>
__device__ __forceinline__ void infer_one_tree(tree_type tree, float* sdata,
int cols, vec<NITEMS>& out) {
int curr[NITEMS];
int mask = (1 << NITEMS) - 1; // all active
for (int j = 0; j < NITEMS; ++j) curr[j] = 0;
do {
#pragma unroll
for (int j = 0; j < NITEMS; ++j) {
if ((mask >> j) & 1 == 0) continue;
auto n = tree[curr[j]];
if (n.is_leaf()) {
mask &= ~(1 << j);
continue;
}
float val = sdata[j * cols + n.fid()];
bool cond = isnan(val) ? !n.def_left() : val >= n.thresh();
curr[j] = n.left(curr[j]) + cond;
}
} while (mask != 0);
#pragma unroll
for (int j = 0; j < NITEMS; ++j) out[j] += tree[curr[j]].output();
}
template <typename tree_type>
__device__ __forceinline__ void infer_one_tree(tree_type tree, float* sdata,
int cols, vec<1>& out) {
int curr = 0;
for (;;) {
auto n = tree[curr];
if (n.is_leaf()) break;
float val = sdata[n.fid()];
bool cond = isnan(val) ? !n.def_left() : val >= n.thresh();
curr = n.left(curr) + cond;
}
out[0] = tree[curr].output();
}
template <int NITEMS, typename storage_type>
__global__ void infer_k(storage_type forest, predict_params params) {
// cache the row for all threads to reuse
extern __shared__ char smem[];
float* sdata = (float*)smem;
size_t rid = blockIdx.x * NITEMS;
for (int j = 0; j < NITEMS; ++j) {
for (int i = threadIdx.x; i < params.num_cols; i += blockDim.x) {
size_t row = rid + j;
sdata[j * params.num_cols + i] =
row < params.num_rows ? params.data[row * params.num_cols + i] : 0.0f;
}
}
__syncthreads();
// one block works on NITEMS rows and the whole forest
vec<NITEMS> out;
for (int i = 0; i < NITEMS; ++i) out[i] = 0.0f;
for (int j = threadIdx.x; j < forest.num_trees(); j += blockDim.x) {
infer_one_tree<NITEMS>(forest[j], sdata, params.num_cols, out);
}
using BlockReduce = cub::BlockReduce<vec<NITEMS>, FIL_TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
out = BlockReduce(tmp_storage).Sum(out);
if (threadIdx.x == 0) {
for (int i = 0; i < NITEMS; ++i) {
int row = blockIdx.x * NITEMS + i;
if (row < params.num_rows)
params.preds[row * params.num_output_classes] = out[i];
}
}
}
template <typename storage_type>
void infer(storage_type forest, predict_params params, cudaStream_t stream) {
const int MAX_BATCH_ITEMS = 4;
params.max_items =
params.algo == algo_t::BATCH_TREE_REORG ? MAX_BATCH_ITEMS : 1;
int num_items = params.max_shm / (sizeof(float) * params.num_cols);
if (num_items == 0) {
int max_cols = params.max_shm / sizeof(float);
ASSERT(false, "p.num_cols == %d: too many features, only %d allowed",
params.num_cols, max_cols);
}
num_items = std::min(num_items, params.max_items);
int num_blocks = ceildiv(int(params.num_rows), num_items);
int shm_sz = num_items * sizeof(float) * params.num_cols;
switch (num_items) {
case 1:
infer_k<1><<<num_blocks, FIL_TPB, shm_sz, stream>>>(forest, params);
break;
case 2:
infer_k<2><<<num_blocks, FIL_TPB, shm_sz, stream>>>(forest, params);
break;
case 3:
infer_k<3><<<num_blocks, FIL_TPB, shm_sz, stream>>>(forest, params);
break;
case 4:
infer_k<4><<<num_blocks, FIL_TPB, shm_sz, stream>>>(forest, params);
break;
default:
ASSERT(false, "internal error: nitems > 4");
}
CUDA_CHECK(cudaPeekAtLastError());
}
template void infer<dense_storage>(dense_storage forest, predict_params params,
cudaStream_t stream);
template void infer<sparse_storage>(sparse_storage forest,
predict_params params, cudaStream_t stream);
} // namespace fil
} // namespace ML
|
248e0b0b27edb61b746b4545a0a320e2ed1721c9.hip | // !!! This is a file automatically generated by hipify!!!
// clang-format off
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
//#include "../../graph_parser/parse.h"
#include <hip/hip_runtime_api.h>
#include "../../graph_parser/util.h"
#include "../../graph_parser/parse.cpp"
#include "../../graph_parser/util.cpp"
#include "kernel.hip"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *cc_d;
// Create device-side buffers for the graph
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&incol_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
// Create buffers for cc
err = hipMalloc(&cc_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc cc_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<int, int> **vertex;
GraphChiContext *context;
err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc vertex (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&context, sizeof(GraphChiContext));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc context (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
hipLaunchKernelGGL(( initContext), dim3(1), dim3(1), 0, 0, context, num_nodes, num_edges);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initObj\n");
hipLaunchKernelGGL(( initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d, inrow_d,
incol_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
hipLaunchKernelGGL(( initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Run CC for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start ConnectedComponent\n");
hipLaunchKernelGGL(( ConnectedComponent), dim3(grid), dim3(threads), 0, 0, vertex, context, i);
printf("Finish ConnectedComponent\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
}
hipDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
hipLaunchKernelGGL(( copyBack), dim3(grid), dim3(threads), 0, 0, vertex, context, cc_d);
printf("End Copyback\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = hipMemcpy(rank_array, cc_d, num_nodes * sizeof(int),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
hipFree(row_d);
hipFree(col_d);
hipFree(inrow_d);
hipFree(incol_d);
hipFree(cc_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
| 248e0b0b27edb61b746b4545a0a320e2ed1721c9.cu | // clang-format off
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
//#include "../../graph_parser/parse.h"
#include <cuda_runtime_api.h>
#include "../../graph_parser/util.h"
#include "../../graph_parser/parse.cpp"
#include "../../graph_parser/util.cpp"
#include "kernel.cu"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *cc_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&incol_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
// Create buffers for cc
err = cudaMalloc(&cc_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc cc_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<int, int> **vertex;
GraphChiContext *context;
err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vertex (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&context, sizeof(GraphChiContext));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc context (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
initContext<<<1, 1>>>(context, num_nodes, num_edges);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initObj\n");
initObject<<<grid, threads>>>(vertex, context, row_d, col_d, inrow_d,
incol_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Run CC for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start ConnectedComponent\n");
ConnectedComponent<<<grid, threads>>>(vertex, context, i);
printf("Finish ConnectedComponent\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
}
cudaDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
copyBack<<<grid, threads>>>(vertex, context, cc_d);
printf("End Copyback\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = cudaMemcpy(rank_array, cc_d, num_nodes * sizeof(int),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(inrow_d);
cudaFree(incol_d);
cudaFree(cc_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
|
7e5338ccbb744fe05a82ef823a4383bc2c934d0e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "./my_cuda_header.h"
#define BLOCKSIZE 16
//Kernel that performs Matrix Vector Multiplication
__global__ void MatrixVectorMultiplication(double *matrixA,
double *matrixB,
double *matrixC,
int i_total,
int j_total,
int k_total,
int NumberofProcessors)
{
for (int i = 0; i < i_total / NumberofProcessors + 1; i++)
{
for (int k = 0; k < k_total; k++)
{
double sum = 0.0;
for (int j = 0; j < j_total; j++)
sum += matrixA[i * j_total + j] * matrixB[j * k_total + k];
matrixC[i * k_total + k] = sum;
}
}
__syncthreads();
}
void implement_cuda_code(int mpi_rank, int mpi_processors,
int i_total, int j_total, int k_total,
double *device_matrixA,
double *device_matrixB,
double *device_matrixC,
double *host_matrixA,
double *host_matrixB,
double *host_matrixC,
int threads_per_block, int number_of_blocks)
{
hipSetDevice(mpi_rank);
//Allocating the Memory on the device memory
hipHostMalloc( (void **)&device_matrixA,
i_total * j_total / mpi_processors * sizeof(double),
hipHostMallocDefault );
hipHostMalloc( (void **)&device_matrixB,
j_total * k_total*sizeof(double),
hipHostMallocDefault);
hipHostMalloc( (void **)&device_matrixC,
i_total * k_total / mpi_processors * sizeof(double),
hipHostMallocDefault );
//Copying the data from host to device
hipMemcpyAsync( (void *)device_matrixA,
(void *)host_matrixA,
i_total * j_total / mpi_processors * sizeof(double),
hipMemcpyHostToDevice );
hipMemcpyAsync( (void *)device_matrixB,
(void *)host_matrixB,
j_total * k_total * sizeof(double),
hipMemcpyHostToDevice );
hipSetDevice(mpi_rank);
//Calling the kernel which performs Matrix Vector Product
hipLaunchKernelGGL(( MatrixVectorMultiplication), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, device_matrixA, device_matrixB, device_matrixC, i_total, j_total, k_total, mpi_processors);
//Copying the value of patial result vector from device to host
hipMemcpy( (void *)host_matrixC,
(void *)device_matrixC,
i_total * k_total / mpi_processors * sizeof(double),
hipMemcpyDeviceToHost );
}
void free_device_memory(double **device_matrixA,
double **device_matrixB,
double **device_matrixC)
{
hipFree( *device_matrixA );
hipFree( *device_matrixB );
hipFree( *device_matrixC );
}
| 7e5338ccbb744fe05a82ef823a4383bc2c934d0e.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include "./my_cuda_header.h"
#define BLOCKSIZE 16
//Kernel that performs Matrix Vector Multiplication
__global__ void MatrixVectorMultiplication(double *matrixA,
double *matrixB,
double *matrixC,
int i_total,
int j_total,
int k_total,
int NumberofProcessors)
{
for (int i = 0; i < i_total / NumberofProcessors + 1; i++)
{
for (int k = 0; k < k_total; k++)
{
double sum = 0.0;
for (int j = 0; j < j_total; j++)
sum += matrixA[i * j_total + j] * matrixB[j * k_total + k];
matrixC[i * k_total + k] = sum;
}
}
__syncthreads();
}
void implement_cuda_code(int mpi_rank, int mpi_processors,
int i_total, int j_total, int k_total,
double *device_matrixA,
double *device_matrixB,
double *device_matrixC,
double *host_matrixA,
double *host_matrixB,
double *host_matrixC,
int threads_per_block, int number_of_blocks)
{
cudaSetDevice(mpi_rank);
//Allocating the Memory on the device memory
cudaHostAlloc( (void **)&device_matrixA,
i_total * j_total / mpi_processors * sizeof(double),
cudaHostAllocDefault );
cudaHostAlloc( (void **)&device_matrixB,
j_total * k_total*sizeof(double),
cudaHostAllocDefault);
cudaHostAlloc( (void **)&device_matrixC,
i_total * k_total / mpi_processors * sizeof(double),
cudaHostAllocDefault );
//Copying the data from host to device
cudaMemcpyAsync( (void *)device_matrixA,
(void *)host_matrixA,
i_total * j_total / mpi_processors * sizeof(double),
cudaMemcpyHostToDevice );
cudaMemcpyAsync( (void *)device_matrixB,
(void *)host_matrixB,
j_total * k_total * sizeof(double),
cudaMemcpyHostToDevice );
cudaSetDevice(mpi_rank);
//Calling the kernel which performs Matrix Vector Product
MatrixVectorMultiplication<<<number_of_blocks, threads_per_block>>>(device_matrixA, device_matrixB, device_matrixC, i_total, j_total, k_total, mpi_processors);
//Copying the value of patial result vector from device to host
cudaMemcpy( (void *)host_matrixC,
(void *)device_matrixC,
i_total * k_total / mpi_processors * sizeof(double),
cudaMemcpyDeviceToHost );
}
void free_device_memory(double **device_matrixA,
double **device_matrixB,
double **device_matrixC)
{
cudaFree( *device_matrixA );
cudaFree( *device_matrixB );
cudaFree( *device_matrixC );
}
|
9b47b01a3b781e46ea57f049e5190ced725fb72e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/BucketizationUtils.h>
#include <ATen/native/Resize.h>
namespace at {
namespace native {
// Implement a numpy like searchsorted and a TF like bucketize function running on cuda
// See details in ATen/nativate/Bucketization.cpp
namespace {
template<typename input_t>
__device__ int64_t lower_bound(const input_t *data_ss, int64_t start, int64_t end, const input_t val, const int64_t *data_sort) {
// sorter gives relative ordering for ND tensors, so we need to save and add the non-updated start as an offset
// i.e. the second row of a 3x3 tensors starts at element 3 but sorter's second row only contains 0, 1, or 2
const int64_t orig_start = start;
while (start < end) {
const int64_t mid = start + ((end - start) >> 1);
const input_t mid_val = data_sort ? data_ss[orig_start + data_sort[mid]] : data_ss[mid];
if (!(mid_val >= val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t>
__device__ int64_t upper_bound(const input_t *data_ss, int64_t start, int64_t end, const input_t val, const int64_t *data_sort) {
// sorter gives relative ordering for ND tensors, so we need to save and add the non-updated start as an offset
// i.e. the second row of a 3x3 tensors starts at element 3 but sorter's second row only contains 0, 1, or 2
const int64_t orig_start = start;
while (start < end) {
const int64_t mid = start + ((end - start) >> 1);
const input_t mid_val = data_sort ? data_ss[orig_start + data_sort[mid]] : data_ss[mid];
if (!(mid_val > val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t, typename output_t>
__global__ void searchsorted_cuda_kernel(
output_t *data_out,
const input_t *data_in,
const input_t *data_bd,
const int64_t *data_sort,
int64_t idim_in,
int64_t idim_bd,
int64_t numel_in,
bool right,
bool is_1d_boundaries) {
for (int64_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < numel_in; tid += blockDim.x * gridDim.x) {
// If boundaries tensor is 1d, we always search the entire boundary tensor
int64_t start_bd = is_1d_boundaries ? 0 : tid / idim_in * idim_bd;
int64_t end_bd = start_bd + idim_bd;
int64_t pos = !right ?
lower_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid], data_sort) - start_bd :
upper_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid], data_sort) - start_bd;
// type conversion might happen here
data_out[tid] = pos;
}
}
template<typename input_t, typename output_t>
void searchsorted_cuda_contiguous(Tensor& result, const Tensor& input, const Tensor& boundaries, const bool& right, const Tensor& sorter) {
int64_t numel_in = input.numel();
bool is_scalar_input = input.dim() == 0 && numel_in == 1;
// inner most dim size of input and boundaries
int64_t idim_in = is_scalar_input ? 1 : input.sizes().back();
int64_t idim_bd = boundaries.sizes().back();
const input_t *data_in = input.data_ptr<input_t>();
const input_t *data_bd = boundaries.data_ptr<input_t>();
const int64_t *data_sort = sorter.defined() ? sorter.data_ptr<int64_t>() : nullptr;
output_t *data_out = result.data_ptr<output_t>();
int64_t maxThread = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
int64_t maxGrid = 1024;
dim3 block = dim3(::min(maxThread, numel_in));
dim3 grid = dim3(::min(maxGrid, ceil_div<int64_t>(numel_in, block.x)));
at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( searchsorted_cuda_kernel), dim3(grid), dim3(block), 0, stream,
data_out, data_in, data_bd, data_sort, idim_in, idim_bd, numel_in, right, boundaries.dim() == 1);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
void dispatch(
Tensor& result,
const Tensor& input,
const Tensor& boundaries,
bool out_int32,
bool right,
const Tensor& sorter) {
if (!out_int32) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int64_t>(result, input, boundaries, right, sorter);
});
}
else {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int>(result, input, boundaries, right, sorter);
});
}
}
}
Tensor& searchsorted_out_cuda(
const Tensor& sorted_sequence,
const Tensor& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter_opt,
Tensor& result) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> sorter_maybe_owned = at::borrow_from_optional_tensor(sorter_opt);
const Tensor& sorter = *sorter_maybe_owned;
searchsorted_pre_check(sorted_sequence, self, result, out_int32, right, side_opt, sorter);
resize_output(result, self.sizes());
// we have two inputs to set right, pre_check checks that they aren't set to opposites
bool is_right = (side_opt && *side_opt == "right") || right;
if (self.numel() == 0) {
return result;
}
// for non-contiguous result tensors, we write the output to a contiguous copy so we can later copy back, maintaing the original result tensor
Tensor out = result;
if (!result.is_contiguous()) {
out = result.contiguous();
}
if (sorted_sequence.is_contiguous() && self.is_contiguous() && sorted_sequence.dtype() == self.dtype() && sorter.is_contiguous()) {
dispatch(out, self, sorted_sequence, out_int32, is_right, sorter);
}
else {
Tensor trimmed_input;
Tensor trimmed_boundaries;
Tensor trimmed_sorter;
searchsorted_maybe_trim_input_tensors(trimmed_input, trimmed_boundaries, trimmed_sorter, self, sorted_sequence, sorter);
const Tensor& final_input = trimmed_input.defined() ? trimmed_input : self;
const Tensor& final_boundaries = trimmed_boundaries.defined() ? trimmed_boundaries : sorted_sequence;
const Tensor& final_sorter = trimmed_sorter.defined() ? trimmed_sorter : sorter;
dispatch(out, final_input, final_boundaries, out_int32, is_right, final_sorter);
}
// if result is non-contiguous, we wrote the answer to a copied version, so we copy back to the original result tensor
if (!result.is_contiguous()) {
result.copy_(out);
}
return result;
}
Tensor searchsorted_cuda(
const Tensor& sorted_sequence,
const Tensor& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::searchsorted_out_cuda(sorted_sequence, self, out_int32, right, side_opt, sorter, result);
return result;
}
// See [Note about _torch_cuda_cu_linker_symbol_op and torch_cuda_cu] in native_functions.yaml
Tensor _torch_cuda_cu_linker_symbol_op_cuda(const Tensor& self) {
return self;
}
Tensor searchsorted_cuda(
const Tensor& sorted_sequence,
const Scalar& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter) {
const Tensor& scalar_tensor = searchsorted_scalar_tensor(self, sorted_sequence.device());
return searchsorted_cuda(sorted_sequence, scalar_tensor, out_int32, right, side_opt, sorter);
}
Tensor& bucketize_out_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right, Tensor& result) {
TORCH_CHECK(boundaries.dim() == 1, "boundaries tensor must be 1 dimension, but got dim(", boundaries.dim(), ")");
at::native::searchsorted_out_cuda(boundaries, self, out_int32, right, nullopt, nullopt, result);
return result;
}
Tensor bucketize_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::bucketize_out_cuda(self, boundaries, out_int32, right, result);
return result;
}
Tensor bucketize_cuda(const Scalar& self, const Tensor& boundaries, bool out_int32, bool right) {
return bucketize_cuda(searchsorted_scalar_tensor(self, boundaries.device()), boundaries, out_int32, right);
}
}} // namespace at::native
| 9b47b01a3b781e46ea57f049e5190ced725fb72e.cu | #include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/BucketizationUtils.h>
#include <ATen/native/Resize.h>
namespace at {
namespace native {
// Implement a numpy like searchsorted and a TF like bucketize function running on cuda
// See details in ATen/nativate/Bucketization.cpp
namespace {
template<typename input_t>
__device__ int64_t lower_bound(const input_t *data_ss, int64_t start, int64_t end, const input_t val, const int64_t *data_sort) {
// sorter gives relative ordering for ND tensors, so we need to save and add the non-updated start as an offset
// i.e. the second row of a 3x3 tensors starts at element 3 but sorter's second row only contains 0, 1, or 2
const int64_t orig_start = start;
while (start < end) {
const int64_t mid = start + ((end - start) >> 1);
const input_t mid_val = data_sort ? data_ss[orig_start + data_sort[mid]] : data_ss[mid];
if (!(mid_val >= val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t>
__device__ int64_t upper_bound(const input_t *data_ss, int64_t start, int64_t end, const input_t val, const int64_t *data_sort) {
// sorter gives relative ordering for ND tensors, so we need to save and add the non-updated start as an offset
// i.e. the second row of a 3x3 tensors starts at element 3 but sorter's second row only contains 0, 1, or 2
const int64_t orig_start = start;
while (start < end) {
const int64_t mid = start + ((end - start) >> 1);
const input_t mid_val = data_sort ? data_ss[orig_start + data_sort[mid]] : data_ss[mid];
if (!(mid_val > val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t, typename output_t>
__global__ void searchsorted_cuda_kernel(
output_t *data_out,
const input_t *data_in,
const input_t *data_bd,
const int64_t *data_sort,
int64_t idim_in,
int64_t idim_bd,
int64_t numel_in,
bool right,
bool is_1d_boundaries) {
for (int64_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < numel_in; tid += blockDim.x * gridDim.x) {
// If boundaries tensor is 1d, we always search the entire boundary tensor
int64_t start_bd = is_1d_boundaries ? 0 : tid / idim_in * idim_bd;
int64_t end_bd = start_bd + idim_bd;
int64_t pos = !right ?
lower_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid], data_sort) - start_bd :
upper_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid], data_sort) - start_bd;
// type conversion might happen here
data_out[tid] = pos;
}
}
template<typename input_t, typename output_t>
void searchsorted_cuda_contiguous(Tensor& result, const Tensor& input, const Tensor& boundaries, const bool& right, const Tensor& sorter) {
int64_t numel_in = input.numel();
bool is_scalar_input = input.dim() == 0 && numel_in == 1;
// inner most dim size of input and boundaries
int64_t idim_in = is_scalar_input ? 1 : input.sizes().back();
int64_t idim_bd = boundaries.sizes().back();
const input_t *data_in = input.data_ptr<input_t>();
const input_t *data_bd = boundaries.data_ptr<input_t>();
const int64_t *data_sort = sorter.defined() ? sorter.data_ptr<int64_t>() : nullptr;
output_t *data_out = result.data_ptr<output_t>();
int64_t maxThread = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
int64_t maxGrid = 1024;
dim3 block = dim3(std::min(maxThread, numel_in));
dim3 grid = dim3(std::min(maxGrid, ceil_div<int64_t>(numel_in, block.x)));
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
searchsorted_cuda_kernel<<<grid, block, 0, stream>>>(
data_out, data_in, data_bd, data_sort, idim_in, idim_bd, numel_in, right, boundaries.dim() == 1);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
void dispatch(
Tensor& result,
const Tensor& input,
const Tensor& boundaries,
bool out_int32,
bool right,
const Tensor& sorter) {
if (!out_int32) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int64_t>(result, input, boundaries, right, sorter);
});
}
else {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int>(result, input, boundaries, right, sorter);
});
}
}
}
Tensor& searchsorted_out_cuda(
const Tensor& sorted_sequence,
const Tensor& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter_opt,
Tensor& result) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> sorter_maybe_owned = at::borrow_from_optional_tensor(sorter_opt);
const Tensor& sorter = *sorter_maybe_owned;
searchsorted_pre_check(sorted_sequence, self, result, out_int32, right, side_opt, sorter);
resize_output(result, self.sizes());
// we have two inputs to set right, pre_check checks that they aren't set to opposites
bool is_right = (side_opt && *side_opt == "right") || right;
if (self.numel() == 0) {
return result;
}
// for non-contiguous result tensors, we write the output to a contiguous copy so we can later copy back, maintaing the original result tensor
Tensor out = result;
if (!result.is_contiguous()) {
out = result.contiguous();
}
if (sorted_sequence.is_contiguous() && self.is_contiguous() && sorted_sequence.dtype() == self.dtype() && sorter.is_contiguous()) {
dispatch(out, self, sorted_sequence, out_int32, is_right, sorter);
}
else {
Tensor trimmed_input;
Tensor trimmed_boundaries;
Tensor trimmed_sorter;
searchsorted_maybe_trim_input_tensors(trimmed_input, trimmed_boundaries, trimmed_sorter, self, sorted_sequence, sorter);
const Tensor& final_input = trimmed_input.defined() ? trimmed_input : self;
const Tensor& final_boundaries = trimmed_boundaries.defined() ? trimmed_boundaries : sorted_sequence;
const Tensor& final_sorter = trimmed_sorter.defined() ? trimmed_sorter : sorter;
dispatch(out, final_input, final_boundaries, out_int32, is_right, final_sorter);
}
// if result is non-contiguous, we wrote the answer to a copied version, so we copy back to the original result tensor
if (!result.is_contiguous()) {
result.copy_(out);
}
return result;
}
Tensor searchsorted_cuda(
const Tensor& sorted_sequence,
const Tensor& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::searchsorted_out_cuda(sorted_sequence, self, out_int32, right, side_opt, sorter, result);
return result;
}
// See [Note about _torch_cuda_cu_linker_symbol_op and torch_cuda_cu] in native_functions.yaml
Tensor _torch_cuda_cu_linker_symbol_op_cuda(const Tensor& self) {
return self;
}
Tensor searchsorted_cuda(
const Tensor& sorted_sequence,
const Scalar& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter) {
const Tensor& scalar_tensor = searchsorted_scalar_tensor(self, sorted_sequence.device());
return searchsorted_cuda(sorted_sequence, scalar_tensor, out_int32, right, side_opt, sorter);
}
Tensor& bucketize_out_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right, Tensor& result) {
TORCH_CHECK(boundaries.dim() == 1, "boundaries tensor must be 1 dimension, but got dim(", boundaries.dim(), ")");
at::native::searchsorted_out_cuda(boundaries, self, out_int32, right, nullopt, nullopt, result);
return result;
}
Tensor bucketize_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::bucketize_out_cuda(self, boundaries, out_int32, right, result);
return result;
}
Tensor bucketize_cuda(const Scalar& self, const Tensor& boundaries, bool out_int32, bool right) {
return bucketize_cuda(searchsorted_scalar_tensor(self, boundaries.device()), boundaries, out_int32, right);
}
}} // namespace at::native
|
2b035101c4d746eab0d6cbbee9b9a626fd33d352.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
#define MATRIX_SIZE 1024
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc(&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size,
hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc(&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size,
hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc(&d_C.elements, size);
// Initialize timing
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
// Get Time
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Time elapsed: %f\n", milliseconds);
// Read C from device memory
hipMemcpy(C.elements, d_C.elements, size,
hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e]
* B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
void initMatrix(Matrix *m) {
m->height = MATRIX_SIZE;
m->width = MATRIX_SIZE;
m->elements = (float *) malloc(m->height * m->width * sizeof(float));
}
void randomMatrix(Matrix m) {
for (int i = 0; i < m.height; i++) {
for (int j = 0; j < m.width; j++) {
m.elements[i*m.width + j] = rand();
}
}
}
int main() {
Matrix A, B, C;
initMatrix(&A);
initMatrix(&B);
initMatrix(&C);
randomMatrix(A);
randomMatrix(B);
MatMul(A, B, C);
return 0;
}
| 2b035101c4d746eab0d6cbbee9b9a626fd33d352.cu | #include <stdio.h>
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
#define MATRIX_SIZE 1024
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Initialize timing
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Get Time
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time elapsed: %f\n", milliseconds);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e]
* B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
void initMatrix(Matrix *m) {
m->height = MATRIX_SIZE;
m->width = MATRIX_SIZE;
m->elements = (float *) malloc(m->height * m->width * sizeof(float));
}
void randomMatrix(Matrix m) {
for (int i = 0; i < m.height; i++) {
for (int j = 0; j < m.width; j++) {
m.elements[i*m.width + j] = rand();
}
}
}
int main() {
Matrix A, B, C;
initMatrix(&A);
initMatrix(&B);
initMatrix(&C);
randomMatrix(A);
randomMatrix(B);
MatMul(A, B, C);
return 0;
}
|
2164a269a7e1c881c8b43143fc80c8beb5d235ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
// Variables
float* h_A; // host vectors
float* h_C;
float* d_A; // device vectors
float* d_C;
// Functions
void RandomInit(float*, int);
__global__ void FindMax(const float*, float*, int);
// Host Code
int main(){
// Settings
// gid -> GPU device id (0, 1, ...)
// err -> error message get from CUDA calls
// N -> Length of an array
// size -> memory size of the allocate array
// sb -> memory size after handle by GPU
// sm -> size of shared memory in each individual block
// m -> the power of threadsPerBlock
// threadsPerBlock, blocksPerGrid -> For launching kernel
//
// start, stop -> CUDA event timer
// Intime -> Calculate the input time, allocate and move data in device memory
// gputime -> Time spent in GPU only
// Outime -> Time used to handle the rest of finding maximum
// gputime_tot -> Time total spent
//
// max_value -> Maximum value inside this array, find by GPU
// max_value_CPU -> Maximum value inside this array, find by CPU
int gid;
hipError_t err;
int N;
int size, sb;
int sm;
int m, threadsPerBlock, blocksPerGrid;
hipEvent_t start, stop;
float Intime, gputime, Outime, gputime_tot, cputime;
float max_value = -2.0;
float max_value_CPU = -2.0;
// Select GPU device
printf("Select the GPU with device ID: ");
scanf("%d", &gid);
err = hipSetDevice(gid);
if (err != hipSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Set GPU with device ID = %d\n", gid);
// Set the size of the vector
printf("Find maximum value within one array:\n");
printf("Enter the length of an array: ");
scanf("%d", &N);
printf("Array length = %d\n", N);
// Set blocksize and grid size
printf("Enter the power (m) of threads per block (2^m): ");
scanf("%d", &m);
threadsPerBlock = pow(2, m);
printf("Threads per block = %d\n", threadsPerBlock);
if(threadsPerBlock > 1024){
printf("The number of threads per block must be less than 1024 (2^m , m <=10) ! \n");
exit(0);
}
printf("Enter the number of blocks per grid: ");
scanf("%d",&blocksPerGrid);
printf("Blocks per grid = %d\n", blocksPerGrid);
if( blocksPerGrid > 2147483647 ) {
printf("The number of blocks must be less than 2147483647 ! \n");
exit(0);
}
// Allocate input array
size = N * sizeof(float);
sb = blocksPerGrid * sizeof(float);
h_A = (float*)malloc(size);
h_C = (float*)malloc(sb);
// Initialize input vectors
RandomInit(h_A, N);
// Create the timer
hipEventCreate(&start);
hipEventCreate(&stop);
// Start the timer: Record allocate memory and move data, from host to device
hipEventRecord(start, 0);
// Allocate the array in device memory
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_C, sb);
// Copy array from host to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
// Stop the timer: Record allocate memory and move data, from host to device
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Calculate spend time: Record allocate memory and move data, from host to device
hipEventElapsedTime(&Intime, start, stop);
printf("=================================\n");
printf("Allocate memory and move data from host to device time spent for GPU: %f (ms) \n", Intime);
// start the timer
hipEventRecord(start, 0);
// Called the kernel
sm = threadsPerBlock * sizeof(float);
hipLaunchKernelGGL(( FindMax) , dim3(blocksPerGrid), dim3(threadsPerBlock), sm , 0, d_A, d_C, N);
// stop the timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Calculate spend time: Matrix Addition calculation time
hipEventElapsedTime(&gputime, start, stop);
printf("Time used for GPU: %f (ms) \n", gputime);
// start the timer
hipEventRecord(start,0);
// Copy result from device memory to host memory
// h_C contains the result of each block in host memory
hipMemcpy(h_C, d_C, sb, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_C);
for(int i = 0; i < blocksPerGrid; i = i+1){
if(h_C[i] > max_value){
max_value = h_C[i];
}
}
// stop the timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime( &Outime, start, stop);
printf("Time used to handle the rest of finding maximum: %f (ms) \n", Outime);
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n", gputime_tot);
// start the timer
hipEventRecord(start,0);
// Compute the reference solution
for(int i = 0; i < N; i = i+1){
if(h_A[i] > max_value_CPU){
max_value_CPU = h_A[i];
}
}
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&cputime, start, stop);
printf("---------------------------------\n");
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("=================================\n");
printf("Speed up of GPU = %f\n", cputime/(gputime_tot));
// Destroy the timer
hipEventDestroy(start);
hipEventDestroy(stop);
// Check the result
printf("=================================\n");
printf("Check the result:\n");
printf("Maximum find by GPU = %.23f\n", max_value);
printf("Maximum find by CPU = %.23f\n", max_value_CPU);
free(h_A);
free(h_C);
hipDeviceReset();
return 0;
}
__global__ void FindMax(const float* A, float* C, int N){
extern __shared__ float cache[];
int i = blockDim.x * blockIdx.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float max = -2.0;
while (i < N) {
if(A[i] > max){
max = A[i];
}
i = i + blockDim.x * gridDim.x;
}
cache[cacheIndex] = max;
__syncthreads();
// Perform parallel reduction, threadsPerBlock must be 2^m
int ib = blockDim.x/2;
while (ib != 0) {
if(cacheIndex < ib){
if(cache[cacheIndex] < cache[cacheIndex + ib]){
cache[cacheIndex] = cache[cacheIndex + ib];
}
}
__syncthreads();
ib = ib / 2;
}
if(cacheIndex == 0){
C[blockIdx.x] = cache[0];
}
}
// Allocates an array with random float entries in (-1,1)
void RandomInit(float* data, int n){
for (int i = 0; i < n; ++i)
data[i] = 2.0*rand()/(float)RAND_MAX - 1.0;
} | 2164a269a7e1c881c8b43143fc80c8beb5d235ed.cu | #include <stdio.h>
#include <stdlib.h>
// Variables
float* h_A; // host vectors
float* h_C;
float* d_A; // device vectors
float* d_C;
// Functions
void RandomInit(float*, int);
__global__ void FindMax(const float*, float*, int);
// Host Code
int main(){
// Settings
// gid -> GPU device id (0, 1, ...)
// err -> error message get from CUDA calls
// N -> Length of an array
// size -> memory size of the allocate array
// sb -> memory size after handle by GPU
// sm -> size of shared memory in each individual block
// m -> the power of threadsPerBlock
// threadsPerBlock, blocksPerGrid -> For launching kernel
//
// start, stop -> CUDA event timer
// Intime -> Calculate the input time, allocate and move data in device memory
// gputime -> Time spent in GPU only
// Outime -> Time used to handle the rest of finding maximum
// gputime_tot -> Time total spent
//
// max_value -> Maximum value inside this array, find by GPU
// max_value_CPU -> Maximum value inside this array, find by CPU
int gid;
cudaError_t err;
int N;
int size, sb;
int sm;
int m, threadsPerBlock, blocksPerGrid;
cudaEvent_t start, stop;
float Intime, gputime, Outime, gputime_tot, cputime;
float max_value = -2.0;
float max_value_CPU = -2.0;
// Select GPU device
printf("Select the GPU with device ID: ");
scanf("%d", &gid);
err = cudaSetDevice(gid);
if (err != cudaSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Set GPU with device ID = %d\n", gid);
// Set the size of the vector
printf("Find maximum value within one array:\n");
printf("Enter the length of an array: ");
scanf("%d", &N);
printf("Array length = %d\n", N);
// Set blocksize and grid size
printf("Enter the power (m) of threads per block (2^m): ");
scanf("%d", &m);
threadsPerBlock = pow(2, m);
printf("Threads per block = %d\n", threadsPerBlock);
if(threadsPerBlock > 1024){
printf("The number of threads per block must be less than 1024 (2^m , m <=10) ! \n");
exit(0);
}
printf("Enter the number of blocks per grid: ");
scanf("%d",&blocksPerGrid);
printf("Blocks per grid = %d\n", blocksPerGrid);
if( blocksPerGrid > 2147483647 ) {
printf("The number of blocks must be less than 2147483647 ! \n");
exit(0);
}
// Allocate input array
size = N * sizeof(float);
sb = blocksPerGrid * sizeof(float);
h_A = (float*)malloc(size);
h_C = (float*)malloc(sb);
// Initialize input vectors
RandomInit(h_A, N);
// Create the timer
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Start the timer: Record allocate memory and move data, from host to device
cudaEventRecord(start, 0);
// Allocate the array in device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_C, sb);
// Copy array from host to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
// Stop the timer: Record allocate memory and move data, from host to device
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Calculate spend time: Record allocate memory and move data, from host to device
cudaEventElapsedTime(&Intime, start, stop);
printf("=================================\n");
printf("Allocate memory and move data from host to device time spent for GPU: %f (ms) \n", Intime);
// start the timer
cudaEventRecord(start, 0);
// Called the kernel
sm = threadsPerBlock * sizeof(float);
FindMax <<< blocksPerGrid, threadsPerBlock, sm >>>(d_A, d_C, N);
// stop the timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Calculate spend time: Matrix Addition calculation time
cudaEventElapsedTime(&gputime, start, stop);
printf("Time used for GPU: %f (ms) \n", gputime);
// start the timer
cudaEventRecord(start,0);
// Copy result from device memory to host memory
// h_C contains the result of each block in host memory
cudaMemcpy(h_C, d_C, sb, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_C);
for(int i = 0; i < blocksPerGrid; i = i+1){
if(h_C[i] > max_value){
max_value = h_C[i];
}
}
// stop the timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &Outime, start, stop);
printf("Time used to handle the rest of finding maximum: %f (ms) \n", Outime);
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n", gputime_tot);
// start the timer
cudaEventRecord(start,0);
// Compute the reference solution
for(int i = 0; i < N; i = i+1){
if(h_A[i] > max_value_CPU){
max_value_CPU = h_A[i];
}
}
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cputime, start, stop);
printf("---------------------------------\n");
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("=================================\n");
printf("Speed up of GPU = %f\n", cputime/(gputime_tot));
// Destroy the timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Check the result
printf("=================================\n");
printf("Check the result:\n");
printf("Maximum find by GPU = %.23f\n", max_value);
printf("Maximum find by CPU = %.23f\n", max_value_CPU);
free(h_A);
free(h_C);
cudaDeviceReset();
return 0;
}
__global__ void FindMax(const float* A, float* C, int N){
extern __shared__ float cache[];
int i = blockDim.x * blockIdx.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float max = -2.0;
while (i < N) {
if(A[i] > max){
max = A[i];
}
i = i + blockDim.x * gridDim.x;
}
cache[cacheIndex] = max;
__syncthreads();
// Perform parallel reduction, threadsPerBlock must be 2^m
int ib = blockDim.x/2;
while (ib != 0) {
if(cacheIndex < ib){
if(cache[cacheIndex] < cache[cacheIndex + ib]){
cache[cacheIndex] = cache[cacheIndex + ib];
}
}
__syncthreads();
ib = ib / 2;
}
if(cacheIndex == 0){
C[blockIdx.x] = cache[0];
}
}
// Allocates an array with random float entries in (-1,1)
void RandomInit(float* data, int n){
for (int i = 0; i < n; ++i)
data[i] = 2.0*rand()/(float)RAND_MAX - 1.0;
} |
c1cd406dc1b9b96fbc88807b4502b2b95987acdc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Solving the 2D acoustic wave equation using explicit finite
* difference method
* Copyright 2018 Chaiwoot Boonyasiriwat. All rights reserved.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
//Kernel
__global__ void Wcalculate(float *u0,float *u1,float *u2,float C2,int nx,int ny){
int i=blockDim.x*blockIdx.x+threadIdx.x;
int j=blockDim.y*blockIdx.y+threadIdx.y;
if( i>0 && i<nx-1 && j>0 && j<ny-1){
u2[i+j*nx] = (2.0f-4.0f*C2)*u1[i+j*nx] - u0[i+j*nx] + C2*(u1[(i+1)+j*nx]+u1[(i-1)+j*nx] + u1[i+(j+1)*nx]+u1[i+(j-1)*nx]);
}
}
__global__ void Wupdate(float *u0,float *u1,float *u2,int nx,int ny){
int i=blockDim.x*blockIdx.x+threadIdx.x;
int j=blockDim.y*blockIdx.y+threadIdx.y;
if( i<nx && j<ny){
u0[i+j*nx] = u1[i+j*nx];
u1[i+j*nx] = u2[i+j*nx];
}
}
int main() {
//allocate parameter
size_t size;
hipEvent_t start, stop;
int nx, ny, nt, ix, iy, it, indx;
float v, dx, dt, C, C2, xmax, ymax, a;
float *u0_h, *u1_h, *u2_h;
//set value
xmax = 1.0f;
ymax = 1.0f;
nx = 201;
ny = 201;
v = 0.1f;
dx = xmax/(nx-1);
dt = 0.035f;
C = v*dt/dx;
C2 = C*C;
nt = 1000;
a = 1000.0;
size = nx*ny*sizeof(float);
u0_h = (float*) malloc(size);
u1_h = (float*) malloc(size);
u2_h = (float*) malloc(size);
float *u0_cu = NULL;
hipMalloc((void**)&u0_cu,size);
float *u1_cu = NULL;
hipMalloc((void**)&u1_cu,size);
float *u2_cu = NULL;
hipMalloc((void**)&u2_cu,size);
//initial u0 u1
for (iy=0; iy<ny; iy++) {
float yy = iy*dx - 0.5*ymax;
for (ix=0; ix<nx; ix++) {
indx = ix+iy*nx;
float xx = ix*dx - 0.5*xmax;
u0_h[indx] = exp(-a*(pow(xx,2)+pow(yy,2)));
u1_h[indx] = u0_h[indx];
u2_h[indx] = 0;
}
}
//coppy u0 -> u0_cu, u1 -> u1_cu
hipMemcpy(u0_cu, u0_h, size,hipMemcpyHostToDevice);
hipMemcpy(u1_cu, u1_h, size,hipMemcpyHostToDevice);
hipMemcpy(u2_cu, u2_h, size,hipMemcpyHostToDevice);
//start wave calculation looping time
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
dim3 G(nx/32 +1,ny/32 +1);
dim3 B(32,32);
for (it=0; it<nt; it++) {
// advance wavefields at inner nodes
hipLaunchKernelGGL(( Wcalculate), dim3(G),dim3(B), 0, 0, u0_cu, u1_cu, u2_cu, C2, nx, ny);
// update
hipLaunchKernelGGL(( Wupdate), dim3(G),dim3(B), 0, 0, u0_cu, u1_cu, u2_cu, nx, ny);
}
hipMemcpy(u2_h, u2_cu, size,hipMemcpyDeviceToHost);
//end calculation
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float cpu_time;
hipEventElapsedTime(&cpu_time,start,stop);
printf("CPU time = %lf s\n", cpu_time*0.001);
// output the final snapshot
FILE *file = fopen("u_cu.dat","w");
fwrite(u2_h, sizeof(float), nx*ny, file);
fclose(file);
// Free memory
free(u0_h);
free(u1_h);
free(u2_h);
hipFree(u1_cu);
hipFree(u1_cu);
hipFree(u2_cu);
return 0;
}
| c1cd406dc1b9b96fbc88807b4502b2b95987acdc.cu | /* Solving the 2D acoustic wave equation using explicit finite
* difference method
* Copyright 2018 Chaiwoot Boonyasiriwat. All rights reserved.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
//Kernel
__global__ void Wcalculate(float *u0,float *u1,float *u2,float C2,int nx,int ny){
int i=blockDim.x*blockIdx.x+threadIdx.x;
int j=blockDim.y*blockIdx.y+threadIdx.y;
if( i>0 && i<nx-1 && j>0 && j<ny-1){
u2[i+j*nx] = (2.0f-4.0f*C2)*u1[i+j*nx] - u0[i+j*nx] + C2*(u1[(i+1)+j*nx]+u1[(i-1)+j*nx] + u1[i+(j+1)*nx]+u1[i+(j-1)*nx]);
}
}
__global__ void Wupdate(float *u0,float *u1,float *u2,int nx,int ny){
int i=blockDim.x*blockIdx.x+threadIdx.x;
int j=blockDim.y*blockIdx.y+threadIdx.y;
if( i<nx && j<ny){
u0[i+j*nx] = u1[i+j*nx];
u1[i+j*nx] = u2[i+j*nx];
}
}
int main() {
//allocate parameter
size_t size;
cudaEvent_t start, stop;
int nx, ny, nt, ix, iy, it, indx;
float v, dx, dt, C, C2, xmax, ymax, a;
float *u0_h, *u1_h, *u2_h;
//set value
xmax = 1.0f;
ymax = 1.0f;
nx = 201;
ny = 201;
v = 0.1f;
dx = xmax/(nx-1);
dt = 0.035f;
C = v*dt/dx;
C2 = C*C;
nt = 1000;
a = 1000.0;
size = nx*ny*sizeof(float);
u0_h = (float*) malloc(size);
u1_h = (float*) malloc(size);
u2_h = (float*) malloc(size);
float *u0_cu = NULL;
cudaMalloc((void**)&u0_cu,size);
float *u1_cu = NULL;
cudaMalloc((void**)&u1_cu,size);
float *u2_cu = NULL;
cudaMalloc((void**)&u2_cu,size);
//initial u0 u1
for (iy=0; iy<ny; iy++) {
float yy = iy*dx - 0.5*ymax;
for (ix=0; ix<nx; ix++) {
indx = ix+iy*nx;
float xx = ix*dx - 0.5*xmax;
u0_h[indx] = exp(-a*(pow(xx,2)+pow(yy,2)));
u1_h[indx] = u0_h[indx];
u2_h[indx] = 0;
}
}
//coppy u0 -> u0_cu, u1 -> u1_cu
cudaMemcpy(u0_cu, u0_h, size,cudaMemcpyHostToDevice);
cudaMemcpy(u1_cu, u1_h, size,cudaMemcpyHostToDevice);
cudaMemcpy(u2_cu, u2_h, size,cudaMemcpyHostToDevice);
//start wave calculation looping time
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
dim3 G(nx/32 +1,ny/32 +1);
dim3 B(32,32);
for (it=0; it<nt; it++) {
// advance wavefields at inner nodes
Wcalculate<<<G,B>>>(u0_cu, u1_cu, u2_cu, C2, nx, ny);
// update
Wupdate<<<G,B>>>(u0_cu, u1_cu, u2_cu, nx, ny);
}
cudaMemcpy(u2_h, u2_cu, size,cudaMemcpyDeviceToHost);
//end calculation
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float cpu_time;
cudaEventElapsedTime(&cpu_time,start,stop);
printf("CPU time = %lf s\n", cpu_time*0.001);
// output the final snapshot
FILE *file = fopen("u_cu.dat","w");
fwrite(u2_h, sizeof(float), nx*ny, file);
fclose(file);
// Free memory
free(u0_h);
free(u1_h);
free(u2_h);
cudaFree(u1_cu);
cudaFree(u1_cu);
cudaFree(u2_cu);
return 0;
}
|
cc2e6c4df01dc778839fcfa8878f9fbaf1a2b4bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void mapping(double *point_cloud,
const double *img1, const double *img2,
const double *img3, const double *img4,
const double *T,
const double *P1, const double *P2, const double *P3, const double *P4,
const double *ratiox, const double *ratioy, const double *ratioz,
const int *img_width, const int *img_height)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
int idz = threadIdx.z;
int width = gridDim.x*blockDim.x;
int height = gridDim.y*blockDim.y;
int index = idz*width*height + idx*height + idy;
//grid to coordinates
double mx = idx*ratiox[0];
double my = idy*ratioy[0];
double mz = idz*ratioz[0];
//world coordinates
double x = T[0]*mx + T[4]*my + T[8]*mz + T[12];
double y = T[1]*mx + T[5]*my + T[9]*mz + T[13];
double z = mz;
//image coordinates
double u1 = P1[0]*x + P1[3]*y + P1[6]*z + P1[9];
double v1 = P1[1]*x + P1[4]*y + P1[7]*z + P1[10];
double norm1 = P1[2]*x + P1[5]*y + P1[8]*z + P1[11];
u1/=norm1;
v1/=norm1;
double u2 = P2[0]*x + P2[3]*y + P2[6]*z + P2[9];
double v2 = P2[1]*x + P2[4]*y + P2[7]*z + P2[10];
double norm2 = P2[2]*x + P2[5]*y + P2[8]*z + P2[11];
u2/=norm2;
v2/=norm2;
double u3 = P3[0]*x + P3[3]*y + P3[6]*z + P3[9];
double v3 = P3[1]*x + P3[4]*y + P3[7]*z + P3[10];
double norm3 = P3[2]*x + P3[5]*y + P3[8]*z + P3[11];
u3/=norm3;
v3/=norm3;
double u4 = P4[0]*x + P4[3]*y + P4[6]*z + P4[9];
double v4 = P4[1]*x + P4[4]*y + P4[7]*z + P4[10];
double norm4 = P4[2]*x + P4[5]*y + P4[8]*z + P4[11];
u4/=norm4;
v4/=norm4;
int u11 = (u1);
int u22 = (u2);
int u33 = (u3);
int u44 = (u4);
int v11 = (v1);
int v22 = (v2);
int v33 = (v3);
int v44 = (v4);
int final_width = img_width[0];
int final_height = img_height[0];
int seen_record[4] = {0};
//decide the point cloud
if((u11>0)&&(u11<final_width)&&(v11>0)&&(v11<final_height))
seen_record[0] = 1;
if((u22>0)&&(u22<final_width)&&(v22>0)&&(v22<final_height))
seen_record[1] = 1;
if((u33>0)&&(u33<final_width)&&(v33>0)&&(v33<final_height))
seen_record[2] = 1;
if((u44>0)&&(u44<final_width)&&(v44>0)&&(v44<final_height))
seen_record[3] = 1;
int sum = 0;
if((seen_record[0]==1)&&(img1[u11*final_height+v11]!=0))
++sum;
if((seen_record[1]==1)&&(img2[u22*final_height+v22]!=0))
++sum;
if((seen_record[2]==1)&&(img3[u33*final_height+v33]!=0))
++sum;
if((seen_record[3]==1)&&(img4[u44*final_height+v44]!=0))
++sum;
point_cloud[index] = 0;
if(sum>=3)
{
point_cloud[index] = 1;
}
} | cc2e6c4df01dc778839fcfa8878f9fbaf1a2b4bd.cu |
__global__ void mapping(double *point_cloud,
const double *img1, const double *img2,
const double *img3, const double *img4,
const double *T,
const double *P1, const double *P2, const double *P3, const double *P4,
const double *ratiox, const double *ratioy, const double *ratioz,
const int *img_width, const int *img_height)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
int idz = threadIdx.z;
int width = gridDim.x*blockDim.x;
int height = gridDim.y*blockDim.y;
int index = idz*width*height + idx*height + idy;
//grid to coordinates
double mx = idx*ratiox[0];
double my = idy*ratioy[0];
double mz = idz*ratioz[0];
//world coordinates
double x = T[0]*mx + T[4]*my + T[8]*mz + T[12];
double y = T[1]*mx + T[5]*my + T[9]*mz + T[13];
double z = mz;
//image coordinates
double u1 = P1[0]*x + P1[3]*y + P1[6]*z + P1[9];
double v1 = P1[1]*x + P1[4]*y + P1[7]*z + P1[10];
double norm1 = P1[2]*x + P1[5]*y + P1[8]*z + P1[11];
u1/=norm1;
v1/=norm1;
double u2 = P2[0]*x + P2[3]*y + P2[6]*z + P2[9];
double v2 = P2[1]*x + P2[4]*y + P2[7]*z + P2[10];
double norm2 = P2[2]*x + P2[5]*y + P2[8]*z + P2[11];
u2/=norm2;
v2/=norm2;
double u3 = P3[0]*x + P3[3]*y + P3[6]*z + P3[9];
double v3 = P3[1]*x + P3[4]*y + P3[7]*z + P3[10];
double norm3 = P3[2]*x + P3[5]*y + P3[8]*z + P3[11];
u3/=norm3;
v3/=norm3;
double u4 = P4[0]*x + P4[3]*y + P4[6]*z + P4[9];
double v4 = P4[1]*x + P4[4]*y + P4[7]*z + P4[10];
double norm4 = P4[2]*x + P4[5]*y + P4[8]*z + P4[11];
u4/=norm4;
v4/=norm4;
int u11 = (u1);
int u22 = (u2);
int u33 = (u3);
int u44 = (u4);
int v11 = (v1);
int v22 = (v2);
int v33 = (v3);
int v44 = (v4);
int final_width = img_width[0];
int final_height = img_height[0];
int seen_record[4] = {0};
//decide the point cloud
if((u11>0)&&(u11<final_width)&&(v11>0)&&(v11<final_height))
seen_record[0] = 1;
if((u22>0)&&(u22<final_width)&&(v22>0)&&(v22<final_height))
seen_record[1] = 1;
if((u33>0)&&(u33<final_width)&&(v33>0)&&(v33<final_height))
seen_record[2] = 1;
if((u44>0)&&(u44<final_width)&&(v44>0)&&(v44<final_height))
seen_record[3] = 1;
int sum = 0;
if((seen_record[0]==1)&&(img1[u11*final_height+v11]!=0))
++sum;
if((seen_record[1]==1)&&(img2[u22*final_height+v22]!=0))
++sum;
if((seen_record[2]==1)&&(img3[u33*final_height+v33]!=0))
++sum;
if((seen_record[3]==1)&&(img4[u44*final_height+v44]!=0))
++sum;
point_cloud[index] = 0;
if(sum>=3)
{
point_cloud[index] = 1;
}
} |
43131419c1624cfcbbc84a2ecf748cd0a196bd33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string>
#include <device_launch_parameters.h>
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Gtype, typename Wtype>
__global__ void SGDRegUpdateAllAndClear(int N,
Gtype* g, Wtype* w, Wtype* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
CUDA_KERNEL_LOOP(i, N) {
Wtype reg = reg_L2 ? w[i] : Wtype((Wtype(0) < w[i]) - (w[i] < Wtype(0)));
Wtype gr = Wtype(g[i]) + reg * local_decay;
gr = h[i] = momentum * h[i] + local_rate * gr;
w[i] -= gr;
g[i] = clear_grads ? Gtype(0) : Gtype(gr);
}
}
template<>
__global__ void SGDRegUpdateAllAndClear<half, half>(int N,
half* g, half* w, half* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
half hz;
CUDA_KERNEL_LOOP(i, N) {
float wf = __half2float(w[i]);
float gf = __half2float(g[i]);
float hf = __half2float(h[i]);
float reg = reg_L2 ? wf : float((0.F < wf)-(wf < 0.F));
gf += reg * local_decay;
gf = hf = momentum * hf + local_rate * gf;
wf -= gf;
h[i] = float2half_clip(hf);
w[i] = float2half_clip(wf);
g[i] = clear_grads ? hz : float2half_clip(gf);
}
}
template<>
__global__ void SGDRegUpdateAllAndClear<half, float>(int N,
half* g, float* w, float* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
half hz;
CUDA_KERNEL_LOOP(i, N) {
float reg = reg_L2 ? w[i] : (0.F < w[i]) - (w[i] < 0.F);
float gr = __half2float(g[i]) + reg * local_decay;
gr = h[i] = momentum * h[i] + local_rate * gr;
w[i] -= gr;
g[i] = clear_grads ? hz : float2half_clip(h[i]);
}
}
template<typename Gtype, typename Wtype>
void sgd_reg_update_all_and_clear_gpu(int N,
Gtype* g, Wtype* w, Wtype* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SGDRegUpdateAllAndClear), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N,
g, w, h,
momentum, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template void sgd_reg_update_all_and_clear_gpu<float16, double>(int, float16*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float>(int, float*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, double>(int, float*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float16>(int, float*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, float>(int, double*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, double>(int, double*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, float16>(int, double*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template<>
void
sgd_reg_update_all_and_clear_gpu<float16, float16>(int N,
float16* g, float16* w, float16* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SGDRegUpdateAllAndClear), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N,
reinterpret_cast<half*>(g), reinterpret_cast<half*>(w), reinterpret_cast<half*>(h),
momentum, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void
sgd_reg_update_all_and_clear_gpu<float16, float>(int N,
float16* g, float* w, float* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SGDRegUpdateAllAndClear), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
N, reinterpret_cast<half*>(g), w, h, momentum, local_rate,
local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
} // namespace caffe
| 43131419c1624cfcbbc84a2ecf748cd0a196bd33.cu | #include <string>
#include <device_launch_parameters.h>
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Gtype, typename Wtype>
__global__ void SGDRegUpdateAllAndClear(int N,
Gtype* g, Wtype* w, Wtype* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
CUDA_KERNEL_LOOP(i, N) {
Wtype reg = reg_L2 ? w[i] : Wtype((Wtype(0) < w[i]) - (w[i] < Wtype(0)));
Wtype gr = Wtype(g[i]) + reg * local_decay;
gr = h[i] = momentum * h[i] + local_rate * gr;
w[i] -= gr;
g[i] = clear_grads ? Gtype(0) : Gtype(gr);
}
}
template<>
__global__ void SGDRegUpdateAllAndClear<half, half>(int N,
half* g, half* w, half* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
half hz;
CUDA_KERNEL_LOOP(i, N) {
float wf = __half2float(w[i]);
float gf = __half2float(g[i]);
float hf = __half2float(h[i]);
float reg = reg_L2 ? wf : float((0.F < wf)-(wf < 0.F));
gf += reg * local_decay;
gf = hf = momentum * hf + local_rate * gf;
wf -= gf;
h[i] = float2half_clip(hf);
w[i] = float2half_clip(wf);
g[i] = clear_grads ? hz : float2half_clip(gf);
}
}
template<>
__global__ void SGDRegUpdateAllAndClear<half, float>(int N,
half* g, float* w, float* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
half hz;
CUDA_KERNEL_LOOP(i, N) {
float reg = reg_L2 ? w[i] : (0.F < w[i]) - (w[i] < 0.F);
float gr = __half2float(g[i]) + reg * local_decay;
gr = h[i] = momentum * h[i] + local_rate * gr;
w[i] -= gr;
g[i] = clear_grads ? hz : float2half_clip(h[i]);
}
}
template<typename Gtype, typename Wtype>
void sgd_reg_update_all_and_clear_gpu(int N,
Gtype* g, Wtype* w, Wtype* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
SGDRegUpdateAllAndClear<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N,
g, w, h,
momentum, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template void sgd_reg_update_all_and_clear_gpu<float16, double>(int, float16*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float>(int, float*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, double>(int, float*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float16>(int, float*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, float>(int, double*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, double>(int, double*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, float16>(int, double*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template<>
void
sgd_reg_update_all_and_clear_gpu<float16, float16>(int N,
float16* g, float16* w, float16* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
SGDRegUpdateAllAndClear<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N,
reinterpret_cast<half*>(g), reinterpret_cast<half*>(w), reinterpret_cast<half*>(h),
momentum, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void
sgd_reg_update_all_and_clear_gpu<float16, float>(int N,
float16* g, float* w, float* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
SGDRegUpdateAllAndClear<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>
(N, reinterpret_cast<half*>(g), w, h, momentum, local_rate,
local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
} // namespace caffe
|
d39fb7d401ca03ceeb1f2f4fe7a2fc76c33726a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
/**
* @brief sample from a uniform distribution
*/
float nn::generateUniformRandom(float range)
{
return range - float(rand()) / float(RAND_MAX);
}
// void nn::apply_grad(float* param, float* grad, float lr, int param_fdim, hipblasHandle_t handle)
// {
// float nlr = -lr;
// hipblasStatus_t stat = hipblasSaxpy(handle, param_fdim, &nlr, grad, 1, param, 1);
// if(stat != HIPBLAS_STATUS_SUCCESS)
// {
// printf("hipblasSaxpy failed\n");
// }
// }
void nn::apply_grad(float* param, float* grad, float lr, int param_fdim)
{
hipLaunchKernelGGL(( cuda_apply_grad), dim3(64),dim3(64), 0, 0, param, grad, lr, param_fdim);
}
__global__ void nn::cuda_apply_grad(float* param, float* grad, float lr, int N)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
param[idx] -= lr * grad[idx];
}
}
| d39fb7d401ca03ceeb1f2f4fe7a2fc76c33726a2.cu | #include "common.h"
/**
* @brief sample from a uniform distribution
*/
float nn::generateUniformRandom(float range)
{
return range - float(rand()) / float(RAND_MAX);
}
// void nn::apply_grad(float* param, float* grad, float lr, int param_fdim, cublasHandle_t handle)
// {
// float nlr = -lr;
// cublasStatus_t stat = cublasSaxpy(handle, param_fdim, &nlr, grad, 1, param, 1);
// if(stat != CUBLAS_STATUS_SUCCESS)
// {
// printf("cublasSaxpy failed\n");
// }
// }
void nn::apply_grad(float* param, float* grad, float lr, int param_fdim)
{
cuda_apply_grad<<<64,64>>>(param, grad, lr, param_fdim);
}
__global__ void nn::cuda_apply_grad(float* param, float* grad, float lr, int N)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
param[idx] -= lr * grad[idx];
}
}
|
be27877ac6a9cbd46d54063b86d190c7831e48aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <kernels/gpu/cast.h>
#include <core/tensor_builder.h>
#include <global/operator_factory.h>
#include "global/fp16_operator_factory.h"
#include <backend/name.h>
#include <utils/assert.h>
#include <core/device.h>
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "kernels/gpu/gpu_kernel.h"
/////////////////////////////////////////////////
namespace ts {
namespace gpu {
template<typename T_IN, typename T_OUT>
static __global__ void gpu_cast_kernel(T_OUT * dst, const T_IN * src, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
dst[index] = static_cast<T_OUT>(src[index]);
}
}
template<typename T_IN>
static __global__ void gpu_cast_kernel(half * dst, const T_IN * src, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
dst[index] = __float2half(static_cast<float>(src[index]));
}
}
template<typename T_OUT>
static __global__ void gpu_cast_kernel(T_OUT * dst, const half * src, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
dst[index] = static_cast<T_OUT>(__half2float(src[index]));
}
}
static __global__ void gpu_cast_kernel(half * dst, const half * src, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
dst[index] = src[index];
}
}
template<typename T_IN, typename T_OUT>
static void gpu_cast_compute_run_template(const Tensor &x, Tensor &out) {
const T_IN *psrc = x.data<T_IN>();
T_OUT *pdst = out.data<T_OUT>();
if (x.dtype() == out.dtype()) {
memcpy((void*)pdst, out.device(), x.count() * sizeof(T_IN),
(void*)psrc, x.device(), x.count() * sizeof(T_IN));
return;
}
RUN_KERNEL(gpu_cast_kernel, CUDA_BLOCK(x.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
pdst, psrc, x.count());
}
template<typename T_IN>
static void gpu_cast_compute_run(const Tensor &x, DTYPE to_type, Tensor &out) {
switch (to_type) {
#define DECLARE_COMPUTE_RUN_TEMPLATE(DTYPE, TYPE) \
case DTYPE: { gpu_cast_compute_run_template<T_IN, TYPE>(x, out); break; }
DECLARE_COMPUTE_RUN_TEMPLATE(INT8, int8_t);
DECLARE_COMPUTE_RUN_TEMPLATE(UINT8, uint8_t);
DECLARE_COMPUTE_RUN_TEMPLATE(INT16, int16_t);
DECLARE_COMPUTE_RUN_TEMPLATE(UINT16, uint16_t);
DECLARE_COMPUTE_RUN_TEMPLATE(INT32, int32_t);
DECLARE_COMPUTE_RUN_TEMPLATE(UINT32, uint32_t);
DECLARE_COMPUTE_RUN_TEMPLATE(INT64, int64_t);
DECLARE_COMPUTE_RUN_TEMPLATE(UINT64, uint64_t);
DECLARE_COMPUTE_RUN_TEMPLATE(FLOAT32, float);
DECLARE_COMPUTE_RUN_TEMPLATE(FLOAT64, double);
DECLARE_COMPUTE_RUN_TEMPLATE(FLOAT16, half);
DECLARE_COMPUTE_RUN_TEMPLATE(BOOLEAN, uint8_t);
#undef DECLARE_COMPUTE_RUN_TEMPLATE
default: {
TS_LOG_ERROR << "_cast not support data type(" << to_type << "): " << type_str(to_type) << eject;
break;
}
}
}
void CastV2::cast(const Tensor &x, DTYPE to_type, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = x.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { gpu_cast_compute_run<TYPE>(x, to_type, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
DECLARE_COMPUTE_RUN(FLOAT16, half);
DECLARE_COMPUTE_RUN(BOOLEAN, uint8_t);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
/////////////////////////////////////////////////
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(CastV2, GPU, name::layer::cast())
TS_REGISTER_FP16_OPERATOR(CastV2, ts::GPU, name::layer::cast())
| be27877ac6a9cbd46d54063b86d190c7831e48aa.cu | #include <kernels/gpu/cast.h>
#include <core/tensor_builder.h>
#include <global/operator_factory.h>
#include "global/fp16_operator_factory.h"
#include <backend/name.h>
#include <utils/assert.h>
#include <core/device.h>
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "kernels/gpu/gpu_kernel.h"
/////////////////////////////////////////////////
namespace ts {
namespace gpu {
template<typename T_IN, typename T_OUT>
static __global__ void gpu_cast_kernel(T_OUT * dst, const T_IN * src, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
dst[index] = static_cast<T_OUT>(src[index]);
}
}
template<typename T_IN>
static __global__ void gpu_cast_kernel(half * dst, const T_IN * src, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
dst[index] = __float2half(static_cast<float>(src[index]));
}
}
template<typename T_OUT>
static __global__ void gpu_cast_kernel(T_OUT * dst, const half * src, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
dst[index] = static_cast<T_OUT>(__half2float(src[index]));
}
}
static __global__ void gpu_cast_kernel(half * dst, const half * src, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
dst[index] = src[index];
}
}
template<typename T_IN, typename T_OUT>
static void gpu_cast_compute_run_template(const Tensor &x, Tensor &out) {
const T_IN *psrc = x.data<T_IN>();
T_OUT *pdst = out.data<T_OUT>();
if (x.dtype() == out.dtype()) {
memcpy((void*)pdst, out.device(), x.count() * sizeof(T_IN),
(void*)psrc, x.device(), x.count() * sizeof(T_IN));
return;
}
RUN_KERNEL(gpu_cast_kernel, CUDA_BLOCK(x.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
pdst, psrc, x.count());
}
template<typename T_IN>
static void gpu_cast_compute_run(const Tensor &x, DTYPE to_type, Tensor &out) {
switch (to_type) {
#define DECLARE_COMPUTE_RUN_TEMPLATE(DTYPE, TYPE) \
case DTYPE: { gpu_cast_compute_run_template<T_IN, TYPE>(x, out); break; }
DECLARE_COMPUTE_RUN_TEMPLATE(INT8, int8_t);
DECLARE_COMPUTE_RUN_TEMPLATE(UINT8, uint8_t);
DECLARE_COMPUTE_RUN_TEMPLATE(INT16, int16_t);
DECLARE_COMPUTE_RUN_TEMPLATE(UINT16, uint16_t);
DECLARE_COMPUTE_RUN_TEMPLATE(INT32, int32_t);
DECLARE_COMPUTE_RUN_TEMPLATE(UINT32, uint32_t);
DECLARE_COMPUTE_RUN_TEMPLATE(INT64, int64_t);
DECLARE_COMPUTE_RUN_TEMPLATE(UINT64, uint64_t);
DECLARE_COMPUTE_RUN_TEMPLATE(FLOAT32, float);
DECLARE_COMPUTE_RUN_TEMPLATE(FLOAT64, double);
DECLARE_COMPUTE_RUN_TEMPLATE(FLOAT16, half);
DECLARE_COMPUTE_RUN_TEMPLATE(BOOLEAN, uint8_t);
#undef DECLARE_COMPUTE_RUN_TEMPLATE
default: {
TS_LOG_ERROR << "_cast not support data type(" << to_type << "): " << type_str(to_type) << eject;
break;
}
}
}
void CastV2::cast(const Tensor &x, DTYPE to_type, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = x.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { gpu_cast_compute_run<TYPE>(x, to_type, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
DECLARE_COMPUTE_RUN(FLOAT16, half);
DECLARE_COMPUTE_RUN(BOOLEAN, uint8_t);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
/////////////////////////////////////////////////
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(CastV2, GPU, name::layer::cast())
TS_REGISTER_FP16_OPERATOR(CastV2, ts::GPU, name::layer::cast())
|
9a5c275b72da12888a1204a7553563ad6601094a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <clover_field_order.h>
#include <tune_quda.h>
namespace quda {
#ifdef GPU_CLOVER_DIRAC
/**
Kernel argument struct
*/
template <typename Out, typename In>
struct CopyCloverArg {
Out out;
const In in;
int volumeCB;
CopyCloverArg (const Out &out, const In in, int volume) : out(out), in(in), volumeCB(in.volumeCB) { }
};
/**
Generic CPU clover reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename Out, typename In>
void copyClover(CopyCloverArg<Out,In> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int x=0; x<arg.volumeCB; x++) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, parity);
}
}
}
/**
Generic CUDA clover reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename Out, typename In>
__global__ void copyCloverKernel(CopyCloverArg<Out,In> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= arg.volumeCB) return;
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, parity);
}
}
template <typename FloatOut, typename FloatIn, int length, typename Out, typename In>
class CopyClover : Tunable {
CopyCloverArg<Out,In> arg;
const CloverField &meta;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0 ;}
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.volumeCB; }
public:
CopyClover(CopyCloverArg<Out,In> &arg, const CloverField &meta) : arg(arg), meta(meta) {
writeAuxString("out_stride=%d,in_stride=%d", arg.out.stride, arg.in.stride);
}
virtual ~CopyClover() { ; }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( copyCloverKernel<FloatOut, FloatIn, length, Out, In>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't bother printing the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 0; }
long long bytes() const { return 2*arg.volumeCB*(arg.in.Bytes() + arg.out.Bytes()); }
};
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyClover(OutOrder outOrder, const InOrder inOrder, const CloverField &out, QudaFieldLocation location) {
CopyCloverArg<OutOrder,InOrder> arg(outOrder, inOrder, out.Volume());
if (location == QUDA_CPU_FIELD_LOCATION) {
copyClover<FloatOut, FloatIn, length, OutOrder, InOrder>(arg);
} else if (location == QUDA_CUDA_FIELD_LOCATION) {
CopyClover<FloatOut, FloatIn, length, OutOrder, InOrder> cloverCopier(arg, out);
cloverCopier.apply(0);
} else {
errorQuda("Undefined field location %d for copyClover", location);
}
}
template <typename FloatOut, typename FloatIn, int length, typename InOrder>
void copyClover(const InOrder &inOrder, CloverField &out, bool inverse, QudaFieldLocation location, FloatOut *Out, float *outNorm) {
if (out.isNative()) {
typedef typename clover_mapper<FloatOut>::type C;
copyClover<FloatOut,FloatIn,length>(C(out, inverse, Out, outNorm), inOrder, out, location);
} else if (out.Order() == QUDA_PACKED_CLOVER_ORDER) {
copyClover<FloatOut,FloatIn,length>
(QDPOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location);
} else if (out.Order() == QUDA_QDPJIT_CLOVER_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyClover<FloatOut,FloatIn,length>
(QDPJITOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (out.Order() == QUDA_BQCD_CLOVER_ORDER) {
errorQuda("BQCD output not supported");
} else {
errorQuda("Clover field %d order not supported", out.Order());
}
}
template <typename FloatOut, typename FloatIn, int length>
void copyClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location,
FloatOut *Out, FloatIn *In, float *outNorm, float *inNorm) {
// reconstruction only supported on FloatN fields currently
if (in.isNative()) {
typedef typename clover_mapper<FloatIn>::type C;
copyClover<FloatOut,FloatIn,length>(C(in, inverse, In, inNorm), out, inverse, location, Out, outNorm);
} else if (in.Order() == QUDA_PACKED_CLOVER_ORDER) {
copyClover<FloatOut,FloatIn,length>
(QDPOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm);
} else if (in.Order() == QUDA_QDPJIT_CLOVER_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyClover<FloatOut,FloatIn,length>
(QDPJITOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (in.Order() == QUDA_BQCD_CLOVER_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
copyClover<FloatOut,FloatIn,length>
(BQCDOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else {
errorQuda("Clover field %d order not supported", in.Order());
}
}
#endif
// this is the function that is actually called, from here on down we instantiate all required templates
void copyGenericClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location,
void *Out, void *In, void *outNorm, void *inNorm) {
#ifdef GPU_CLOVER_DIRAC
if (out.Precision() == QUDA_HALF_PRECISION && out.Order() > 4)
errorQuda("Half precision not supported for order %d", out.Order());
if (in.Precision() == QUDA_HALF_PRECISION && in.Order() > 4)
errorQuda("Half precision not supported for order %d", in.Order());
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
copyClover<double,double,72>(out, in, inverse, location, (double*)Out, (double*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyClover<double,float,72>(out, in, inverse, location, (double*)Out, (float*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyClover<double,short,72>(out, in, inverse, location, (double*)Out, (short*)In, (float*)outNorm, (float*)inNorm);
}
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
copyClover<float,double,72>(out, in, inverse, location, (float*)Out, (double*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyClover<float,float,72>(out, in, inverse, location, (float*)Out, (float*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyClover<float,short,72>(out, in, inverse, location, (float*)Out, (short*)In, (float*)outNorm, (float*)inNorm);
}
} else if (out.Precision() == QUDA_HALF_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION){
copyClover<short,double,72>(out, in, inverse, location, (short*)Out, (double*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyClover<short,float,72>(out, in, inverse, location, (short*)Out, (float*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyClover<short,short,72>(out, in, inverse, location, (short*)Out, (short*)In, (float*)outNorm, (float*)inNorm);
}
}
#else
errorQuda("Clover has not been built");
#endif
}
} // namespace quda
| 9a5c275b72da12888a1204a7553563ad6601094a.cu | #include <clover_field_order.h>
#include <tune_quda.h>
namespace quda {
#ifdef GPU_CLOVER_DIRAC
/**
Kernel argument struct
*/
template <typename Out, typename In>
struct CopyCloverArg {
Out out;
const In in;
int volumeCB;
CopyCloverArg (const Out &out, const In in, int volume) : out(out), in(in), volumeCB(in.volumeCB) { }
};
/**
Generic CPU clover reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename Out, typename In>
void copyClover(CopyCloverArg<Out,In> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int x=0; x<arg.volumeCB; x++) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, parity);
}
}
}
/**
Generic CUDA clover reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename Out, typename In>
__global__ void copyCloverKernel(CopyCloverArg<Out,In> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= arg.volumeCB) return;
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, parity);
}
}
template <typename FloatOut, typename FloatIn, int length, typename Out, typename In>
class CopyClover : Tunable {
CopyCloverArg<Out,In> arg;
const CloverField &meta;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0 ;}
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.volumeCB; }
public:
CopyClover(CopyCloverArg<Out,In> &arg, const CloverField &meta) : arg(arg), meta(meta) {
writeAuxString("out_stride=%d,in_stride=%d", arg.out.stride, arg.in.stride);
}
virtual ~CopyClover() { ; }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
copyCloverKernel<FloatOut, FloatIn, length, Out, In>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't bother printing the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 0; }
long long bytes() const { return 2*arg.volumeCB*(arg.in.Bytes() + arg.out.Bytes()); }
};
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyClover(OutOrder outOrder, const InOrder inOrder, const CloverField &out, QudaFieldLocation location) {
CopyCloverArg<OutOrder,InOrder> arg(outOrder, inOrder, out.Volume());
if (location == QUDA_CPU_FIELD_LOCATION) {
copyClover<FloatOut, FloatIn, length, OutOrder, InOrder>(arg);
} else if (location == QUDA_CUDA_FIELD_LOCATION) {
CopyClover<FloatOut, FloatIn, length, OutOrder, InOrder> cloverCopier(arg, out);
cloverCopier.apply(0);
} else {
errorQuda("Undefined field location %d for copyClover", location);
}
}
template <typename FloatOut, typename FloatIn, int length, typename InOrder>
void copyClover(const InOrder &inOrder, CloverField &out, bool inverse, QudaFieldLocation location, FloatOut *Out, float *outNorm) {
if (out.isNative()) {
typedef typename clover_mapper<FloatOut>::type C;
copyClover<FloatOut,FloatIn,length>(C(out, inverse, Out, outNorm), inOrder, out, location);
} else if (out.Order() == QUDA_PACKED_CLOVER_ORDER) {
copyClover<FloatOut,FloatIn,length>
(QDPOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location);
} else if (out.Order() == QUDA_QDPJIT_CLOVER_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyClover<FloatOut,FloatIn,length>
(QDPJITOrder<FloatOut,length>(out, inverse, Out), inOrder, out, location);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (out.Order() == QUDA_BQCD_CLOVER_ORDER) {
errorQuda("BQCD output not supported");
} else {
errorQuda("Clover field %d order not supported", out.Order());
}
}
template <typename FloatOut, typename FloatIn, int length>
void copyClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location,
FloatOut *Out, FloatIn *In, float *outNorm, float *inNorm) {
// reconstruction only supported on FloatN fields currently
if (in.isNative()) {
typedef typename clover_mapper<FloatIn>::type C;
copyClover<FloatOut,FloatIn,length>(C(in, inverse, In, inNorm), out, inverse, location, Out, outNorm);
} else if (in.Order() == QUDA_PACKED_CLOVER_ORDER) {
copyClover<FloatOut,FloatIn,length>
(QDPOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm);
} else if (in.Order() == QUDA_QDPJIT_CLOVER_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyClover<FloatOut,FloatIn,length>
(QDPJITOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (in.Order() == QUDA_BQCD_CLOVER_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
copyClover<FloatOut,FloatIn,length>
(BQCDOrder<FloatIn,length>(in, inverse, In), out, inverse, location, Out, outNorm);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else {
errorQuda("Clover field %d order not supported", in.Order());
}
}
#endif
// this is the function that is actually called, from here on down we instantiate all required templates
void copyGenericClover(CloverField &out, const CloverField &in, bool inverse, QudaFieldLocation location,
void *Out, void *In, void *outNorm, void *inNorm) {
#ifdef GPU_CLOVER_DIRAC
if (out.Precision() == QUDA_HALF_PRECISION && out.Order() > 4)
errorQuda("Half precision not supported for order %d", out.Order());
if (in.Precision() == QUDA_HALF_PRECISION && in.Order() > 4)
errorQuda("Half precision not supported for order %d", in.Order());
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
copyClover<double,double,72>(out, in, inverse, location, (double*)Out, (double*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyClover<double,float,72>(out, in, inverse, location, (double*)Out, (float*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyClover<double,short,72>(out, in, inverse, location, (double*)Out, (short*)In, (float*)outNorm, (float*)inNorm);
}
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
copyClover<float,double,72>(out, in, inverse, location, (float*)Out, (double*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyClover<float,float,72>(out, in, inverse, location, (float*)Out, (float*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyClover<float,short,72>(out, in, inverse, location, (float*)Out, (short*)In, (float*)outNorm, (float*)inNorm);
}
} else if (out.Precision() == QUDA_HALF_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION){
copyClover<short,double,72>(out, in, inverse, location, (short*)Out, (double*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyClover<short,float,72>(out, in, inverse, location, (short*)Out, (float*)In, (float*)outNorm, (float*)inNorm);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyClover<short,short,72>(out, in, inverse, location, (short*)Out, (short*)In, (float*)outNorm, (float*)inNorm);
}
}
#else
errorQuda("Clover has not been built");
#endif
}
} // namespace quda
|
c5cc02bf915eb4b1728e73fc1b3d46c7ffee9624.hip | // !!! This is a file automatically generated by hipify!!!
#include "list.h"
#include "cuda_utils.h"
#include <assert.h>
#include <stdio.h>
list **lists_create(int lists, int capacity) {
list **lists_cpu = (list**)malloc(lists * sizeof(list*));
list **lists_gpu = NULL;
for (int i = 0; i < lists; i++) {
lists_cpu[i] = list_create(capacity);
}
HANDLE_RESULT(hipMalloc(&lists_gpu, lists * sizeof(list*)));
HANDLE_RESULT(hipMemcpy(lists_gpu, lists_cpu, lists * sizeof(list*), hipMemcpyDefault));
free(lists_cpu);
return lists_gpu;
}
list *list_create(int capacity) {
list list_cpu;
list *list_gpu;
list_cpu.length = 0;
list_cpu.capacity = capacity;
HANDLE_RESULT(hipMalloc(&(list_cpu.arr), (capacity + 1) * sizeof(state*)));
HANDLE_RESULT(hipMalloc(&list_gpu, sizeof(struct list)));
HANDLE_RESULT(hipMemcpy(list_gpu, &list_cpu, sizeof(struct list),
hipMemcpyDefault));
return list_gpu;
}
void lists_destroy(list **lists_gpu, int lists) {
list **lists_cpu = (list**)malloc(lists * sizeof(list*));
HANDLE_RESULT(hipMemcpy(lists_cpu, lists_gpu, lists * sizeof(list*), hipMemcpyDefault));
for (int i = 0; i < lists; i++) {
list_destroy(lists_cpu[i]);
}
HANDLE_RESULT(hipFree(lists_gpu));
free(lists_cpu);
}
void list_destroy(list *list_gpu) {
list list_cpu;
HANDLE_RESULT(hipMemcpy(&list_cpu, list_gpu, sizeof(struct list),
hipMemcpyDefault));
HANDLE_RESULT(hipFree(list_cpu.arr));
HANDLE_RESULT(hipFree(list_gpu));
}
__device__ void list_clear(list *list) {
list->length = 0;
}
__device__ void list_insert(list *list, state *state) {
int index = atomicAdd(&(list->length), 1);
assert(index < list->capacity);
list->arr[index] = state;
}
__device__ void list_remove(list *list, int index) {
assert(list->length < list->capacity);
list->arr[index] = NULL;
}
__device__ state *list_get(list *list, int index) {
assert(index < list->length);
return list->arr[index];
}
| c5cc02bf915eb4b1728e73fc1b3d46c7ffee9624.cu | #include "list.h"
#include "cuda_utils.h"
#include <assert.h>
#include <stdio.h>
list **lists_create(int lists, int capacity) {
list **lists_cpu = (list**)malloc(lists * sizeof(list*));
list **lists_gpu = NULL;
for (int i = 0; i < lists; i++) {
lists_cpu[i] = list_create(capacity);
}
HANDLE_RESULT(cudaMalloc(&lists_gpu, lists * sizeof(list*)));
HANDLE_RESULT(cudaMemcpy(lists_gpu, lists_cpu, lists * sizeof(list*), cudaMemcpyDefault));
free(lists_cpu);
return lists_gpu;
}
list *list_create(int capacity) {
list list_cpu;
list *list_gpu;
list_cpu.length = 0;
list_cpu.capacity = capacity;
HANDLE_RESULT(cudaMalloc(&(list_cpu.arr), (capacity + 1) * sizeof(state*)));
HANDLE_RESULT(cudaMalloc(&list_gpu, sizeof(struct list)));
HANDLE_RESULT(cudaMemcpy(list_gpu, &list_cpu, sizeof(struct list),
cudaMemcpyDefault));
return list_gpu;
}
void lists_destroy(list **lists_gpu, int lists) {
list **lists_cpu = (list**)malloc(lists * sizeof(list*));
HANDLE_RESULT(cudaMemcpy(lists_cpu, lists_gpu, lists * sizeof(list*), cudaMemcpyDefault));
for (int i = 0; i < lists; i++) {
list_destroy(lists_cpu[i]);
}
HANDLE_RESULT(cudaFree(lists_gpu));
free(lists_cpu);
}
void list_destroy(list *list_gpu) {
list list_cpu;
HANDLE_RESULT(cudaMemcpy(&list_cpu, list_gpu, sizeof(struct list),
cudaMemcpyDefault));
HANDLE_RESULT(cudaFree(list_cpu.arr));
HANDLE_RESULT(cudaFree(list_gpu));
}
__device__ void list_clear(list *list) {
list->length = 0;
}
__device__ void list_insert(list *list, state *state) {
int index = atomicAdd(&(list->length), 1);
assert(index < list->capacity);
list->arr[index] = state;
}
__device__ void list_remove(list *list, int index) {
assert(list->length < list->capacity);
list->arr[index] = NULL;
}
__device__ state *list_get(list *list, int index) {
assert(index < list->length);
return list->arr[index];
}
|
0ce8e9db55baa7929c02243726d5da00f30e5477.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "./cpu-config.h"
#include "./cuda-config.cuh"
using namespace std;
//#define GRIDSIZE 2
//#define BLOCKSIZE 1024
//#define TOTALSIZE (BLOCKSIZE*GRIDSIZE)
//
//const unsigned TargetSize = 2 * BLOCKSIZE;
int main(void) {
unsigned* pData;
long long start, end, freq;
pData = (unsigned*)malloc(TargetSize * sizeof(unsigned));
genData(pData, TargetSize);
//check
printf("sorting %d data\n", TargetSize);
printf("%u %u %u %u %u %u\n",
pData[0], pData[1], pData[2], pData[3], pData[4], pData[5]);
printf("is sorted? -- %s\n", isSortedData(pData, TargetSize) ? "yes" : "no");
//perform
QueryPerformanceFrequency((LARGE_INTEGER*)(&freq));
QueryPerformanceCounter((LARGE_INTEGER*)(&start));
bit_sort_iter(pData, TargetSize);
QueryPerformanceCounter((LARGE_INTEGER*)(&end));
//check
printf("elapsed time = %f msec\n", (double)(end - start) * 1000.0 / (double)(freq));
printf("sorting %d data\n", TargetSize);
printf("%u %u %u %u %u %u\n",
pData[0], pData[1], pData[2], pData[3], pData[4], pData[5]);
printf("is sorted? -- %s\n", isSortedData(pData, TargetSize) ? "yes" : "no");
//CUDA
long long s_c, e_c,f_c;
unsigned* pData_c;
pData_c = (unsigned*)malloc(TargetSize * sizeof(unsigned));
unsigned* pData_cDev=NULL;
genData(pData_c, TargetSize);
printf("\n==== CUDA ====\n");
printf("sorting %d data\n", TargetSize);
printf("%u %u %u %u %u %u\n",
pData_c[0], pData_c[1], pData_c[2], pData_c[3], pData_c[4], pData_c[5]);
printf("is sorted? -- %s\n", isSortedData(pData_c, TargetSize) ? "yes" : "no");
hipMalloc((void**)&pData_cDev, TOTALSIZE * sizeof(unsigned));
QueryPerformanceFrequency((LARGE_INTEGER*)(&f_c));
hipMemcpy(pData_cDev, pData_c, TOTALSIZE * sizeof(unsigned), hipMemcpyHostToDevice);
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(BLOCKSIZE, 1, 1);
QueryPerformanceCounter((LARGE_INTEGER*)(&s_c));
kernel << <dimGrid, dimBlock >> > (pData_cDev);
hipMemcpy(pData_c, pData_cDev, TOTALSIZE * sizeof(unsigned), hipMemcpyDeviceToHost);
QueryPerformanceCounter((LARGE_INTEGER*)(&e_c));
//print result
printf("elapsed time = %f msec\n", (double)(e_c - s_c) * 1000.0 / (double)(f_c));
//check
printf("%u %u %u %u %u %u\n",
pData_c[0], pData_c[1], pData_c[2], pData_c[3], pData_c[4], pData_c[5]);
printf("is sorted? -- %s\n", isSortedData(pData_c, TargetSize) ? "yes" : "no");
} | 0ce8e9db55baa7929c02243726d5da00f30e5477.cu | #include <iostream>
#include "./cpu-config.h"
#include "./cuda-config.cuh"
using namespace std;
//#define GRIDSIZE 2
//#define BLOCKSIZE 1024
//#define TOTALSIZE (BLOCKSIZE*GRIDSIZE)
//
//const unsigned TargetSize = 2 * BLOCKSIZE;
int main(void) {
unsigned* pData;
long long start, end, freq;
pData = (unsigned*)malloc(TargetSize * sizeof(unsigned));
genData(pData, TargetSize);
//check
printf("sorting %d data\n", TargetSize);
printf("%u %u %u %u %u %u\n",
pData[0], pData[1], pData[2], pData[3], pData[4], pData[5]);
printf("is sorted? -- %s\n", isSortedData(pData, TargetSize) ? "yes" : "no");
//perform
QueryPerformanceFrequency((LARGE_INTEGER*)(&freq));
QueryPerformanceCounter((LARGE_INTEGER*)(&start));
bit_sort_iter(pData, TargetSize);
QueryPerformanceCounter((LARGE_INTEGER*)(&end));
//check
printf("elapsed time = %f msec\n", (double)(end - start) * 1000.0 / (double)(freq));
printf("sorting %d data\n", TargetSize);
printf("%u %u %u %u %u %u\n",
pData[0], pData[1], pData[2], pData[3], pData[4], pData[5]);
printf("is sorted? -- %s\n", isSortedData(pData, TargetSize) ? "yes" : "no");
//CUDA
long long s_c, e_c,f_c;
unsigned* pData_c;
pData_c = (unsigned*)malloc(TargetSize * sizeof(unsigned));
unsigned* pData_cDev=NULL;
genData(pData_c, TargetSize);
printf("\n==== CUDA ====\n");
printf("sorting %d data\n", TargetSize);
printf("%u %u %u %u %u %u\n",
pData_c[0], pData_c[1], pData_c[2], pData_c[3], pData_c[4], pData_c[5]);
printf("is sorted? -- %s\n", isSortedData(pData_c, TargetSize) ? "yes" : "no");
cudaMalloc((void**)&pData_cDev, TOTALSIZE * sizeof(unsigned));
QueryPerformanceFrequency((LARGE_INTEGER*)(&f_c));
cudaMemcpy(pData_cDev, pData_c, TOTALSIZE * sizeof(unsigned), cudaMemcpyHostToDevice);
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(BLOCKSIZE, 1, 1);
QueryPerformanceCounter((LARGE_INTEGER*)(&s_c));
kernel << <dimGrid, dimBlock >> > (pData_cDev);
cudaMemcpy(pData_c, pData_cDev, TOTALSIZE * sizeof(unsigned), cudaMemcpyDeviceToHost);
QueryPerformanceCounter((LARGE_INTEGER*)(&e_c));
//print result
printf("elapsed time = %f msec\n", (double)(e_c - s_c) * 1000.0 / (double)(f_c));
//check
printf("%u %u %u %u %u %u\n",
pData_c[0], pData_c[1], pData_c[2], pData_c[3], pData_c[4], pData_c[5]);
printf("is sorted? -- %s\n", isSortedData(pData_c, TargetSize) ? "yes" : "no");
} |
21ab0c1b2185bad766e2c6b94084c21262249e1d.hip | // !!! This is a file automatically generated by hipify!!!
/* Sushil Dubey, Shashi Dugad, TIFR, July 2017
*
* File Name: RawToClusterGPU.cu
* Description: It converts Raw data into Digi Format on GPU
* Finaly the Output of RawToDigi data is given to pixelClusterizer
*
**/
// C++ includes
#include <cassert>
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <string>
// CUDA includes
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// CMSSW includes
#include "CUDADataFormats/gpuClusteringConstants.h"
#include "CUDACore/cudaCheck.h"
#include "CUDACore/device_unique_ptr.h"
#include "CUDACore/host_unique_ptr.h"
#include "CondFormats/SiPixelFedCablingMapGPU.h"
// local includes
#include "SiPixelRawToClusterGPUKernel.h"
#include "gpuCalibPixel.h"
#include "gpuClusterChargeCut.h"
#include "gpuClustering.h"
namespace pixelgpudetails {
// number of words for all the FEDs
constexpr uint32_t MAX_FED_WORDS = pixelgpudetails::MAX_FED * pixelgpudetails::MAX_WORD;
SiPixelRawToClusterGPUKernel::WordFedAppender::WordFedAppender() {
word_ = cms::cuda::make_host_noncached_unique<unsigned int[]>(MAX_FED_WORDS, hipHostMallocWriteCombined);
fedId_ = cms::cuda::make_host_noncached_unique<unsigned char[]>(MAX_FED_WORDS, hipHostMallocWriteCombined);
}
void SiPixelRawToClusterGPUKernel::WordFedAppender::initializeWordFed(int fedId,
unsigned int wordCounterGPU,
const uint32_t *src,
unsigned int length) {
std::memcpy(word_.get() + wordCounterGPU, src, sizeof(uint32_t) * length);
std::memset(fedId_.get() + wordCounterGPU / 2, fedId - 1200, length / 2);
}
////////////////////
__device__ uint32_t getLink(uint32_t ww) {
return ((ww >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask);
}
__device__ uint32_t getRoc(uint32_t ww) { return ((ww >> pixelgpudetails::ROC_shift) & pixelgpudetails::ROC_mask); }
__device__ uint32_t getADC(uint32_t ww) { return ((ww >> pixelgpudetails::ADC_shift) & pixelgpudetails::ADC_mask); }
__device__ bool isBarrel(uint32_t rawId) { return (1 == ((rawId >> 25) & 0x7)); }
__device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelFedCablingMapGPU *cablingMap,
uint8_t fed,
uint32_t link,
uint32_t roc) {
uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
pixelgpudetails::DetIdGPU detId = {
cablingMap->RawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]};
return detId;
}
//reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html
//http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071
// Convert local pixel to pixelgpudetails::global pixel
__device__ pixelgpudetails::Pixel frameConversion(
bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) {
int slopeRow = 0, slopeCol = 0;
int rowOffset = 0, colOffset = 0;
if (bpix) {
if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
} // if roc
} else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1
if (rocIdInDetUnit < 8) {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc;
} else {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
}
}
} else { // fpix
if (side == -1) { // pannel 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} else { // pannel 2
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} // side
}
uint32_t gRow = rowOffset + slopeRow * local.row;
uint32_t gCol = colOffset + slopeCol * local.col;
//printf("Inside frameConversion row: %u, column: %u\n", gRow, gCol);
pixelgpudetails::Pixel global = {gRow, gCol};
return global;
}
__device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) {
uint8_t errorType = 0;
// debug = true;
switch (status) {
case (1): {
if (debug)
printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId);
errorType = 35;
break;
}
case (2): {
if (debug)
printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId);
errorType = 36;
break;
}
case (3): {
if (debug)
printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId);
errorType = 37;
break;
}
case (4): {
if (debug)
printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId);
errorType = 38;
break;
}
default:
if (debug)
printf("Cabling check returned unexpected result, status = %i\n", status);
};
return errorType;
}
__device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) {
uint32_t numRowsInRoc = 80;
uint32_t numColsInRoc = 52;
/// row and collumn in ROC representation
return ((rocRow < numRowsInRoc) & (rocCol < numColsInRoc));
}
__device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); }
__device__ uint8_t checkROC(
uint32_t errorWord, uint8_t fedId, uint32_t link, const SiPixelFedCablingMapGPU *cablingMap, bool debug = false) {
uint8_t errorType = (errorWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ERROR_mask;
if (errorType < 25)
return 0;
bool errorFound = false;
switch (errorType) {
case (25): {
errorFound = true;
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1;
if (index > 1 && index <= cablingMap->size) {
if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index]))
errorFound = false;
}
if (debug and errorFound)
printf("Invalid ROC = 25 found (errorType = 25)\n");
break;
}
case (26): {
if (debug)
printf("Gap word found (errorType = 26)\n");
errorFound = true;
break;
}
case (27): {
if (debug)
printf("Dummy word found (errorType = 27)\n");
errorFound = true;
break;
}
case (28): {
if (debug)
printf("Error fifo nearly full (errorType = 28)\n");
errorFound = true;
break;
}
case (29): {
if (debug)
printf("Timeout on a channel (errorType = 29)\n");
if ((errorWord >> pixelgpudetails::OMIT_ERR_shift) & pixelgpudetails::OMIT_ERR_mask) {
if (debug)
printf("...first errorType=29 error, this gets masked out\n");
}
errorFound = true;
break;
}
case (30): {
if (debug)
printf("TBM error trailer (errorType = 30)\n");
int StateMatch_bits = 4;
int StateMatch_shift = 8;
uint32_t StateMatch_mask = ~(~uint32_t(0) << StateMatch_bits);
int StateMatch = (errorWord >> StateMatch_shift) & StateMatch_mask;
if (StateMatch != 1 && StateMatch != 8) {
if (debug)
printf("FED error 30 with unexpected State Bits (errorType = 30)\n");
}
if (StateMatch == 1)
errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30
errorFound = true;
break;
}
case (31): {
if (debug)
printf("Event number error (errorType = 31)\n");
errorFound = true;
break;
}
default:
errorFound = false;
};
return errorFound ? errorType : 0;
}
__device__ uint32_t getErrRawID(uint8_t fedId,
uint32_t errWord,
uint32_t errorType,
const SiPixelFedCablingMapGPU *cablingMap,
bool debug = false) {
uint32_t rID = 0xffffffff;
switch (errorType) {
case 25:
case 30:
case 31:
case 36:
case 40: {
//set dummy values for cabling just to get detId from link
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = 1;
uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
case 29: {
int chanNmbr = 0;
const int DB0_shift = 0;
const int DB1_shift = DB0_shift + 1;
const int DB2_shift = DB1_shift + 1;
const int DB3_shift = DB2_shift + 1;
const int DB4_shift = DB3_shift + 1;
const uint32_t DataBit_mask = ~(~uint32_t(0) << 1);
int CH1 = (errWord >> DB0_shift) & DataBit_mask;
int CH2 = (errWord >> DB1_shift) & DataBit_mask;
int CH3 = (errWord >> DB2_shift) & DataBit_mask;
int CH4 = (errWord >> DB3_shift) & DataBit_mask;
int CH5 = (errWord >> DB4_shift) & DataBit_mask;
int BLOCK_bits = 3;
int BLOCK_shift = 8;
uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits);
int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask;
int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5;
if (BLOCK % 2 == 0)
chanNmbr = (BLOCK / 2) * 9 + localCH;
else
chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH;
if ((chanNmbr < 1) || (chanNmbr > 36))
break; // signifies unexpected result
// set dummy values for cabling just to get detId from link if in Barrel
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = 1;
uint32_t link = chanNmbr;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
case 37:
case 38: {
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = (errWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ROC_mask;
uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
default:
break;
};
return rID;
}
// Kernel to perform Raw to Digi conversion
__global__ void RawToDigi_kernel(const SiPixelFedCablingMapGPU *cablingMap,
const unsigned char *modToUnp,
const uint32_t wordCounter,
const uint32_t *word,
const uint8_t *fedIds,
uint16_t *xx,
uint16_t *yy,
uint16_t *adc,
uint32_t *pdigi,
uint32_t *rawIdArr,
uint16_t *moduleId,
cms::cuda::SimpleVector<PixelErrorCompact> *err,
bool useQualityInfo,
bool includeErrors,
bool debug) {
//if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end);
int32_t first = threadIdx.x + blockIdx.x * blockDim.x;
for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) {
auto gIndex = iloop;
xx[gIndex] = 0;
yy[gIndex] = 0;
adc[gIndex] = 0;
bool skipROC = false;
uint8_t fedId = fedIds[gIndex / 2]; // +1200;
// initialize (too many coninue below)
pdigi[gIndex] = 0;
rawIdArr[gIndex] = 0;
moduleId[gIndex] = 9999;
uint32_t ww = word[gIndex]; // Array containing 32 bit raw data
if (ww == 0) {
// 0 is an indicator of a noise/dead channel, skip these pixels during clusterization
continue;
}
uint32_t link = getLink(ww); // Extract link
uint32_t roc = getRoc(ww); // Extract Roc in link
pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc);
uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug);
skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0);
if (includeErrors and skipROC) {
uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug);
err->push_back(PixelErrorCompact{rID, ww, errorType, fedId});
continue;
}
uint32_t rawId = detId.RawId;
uint32_t rocIdInDetUnit = detId.rocInDet;
bool barrel = isBarrel(rawId);
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
if (useQualityInfo) {
skipROC = cablingMap->badRocs[index];
if (skipROC)
continue;
}
skipROC = modToUnp[index];
if (skipROC)
continue;
uint32_t layer = 0; //, ladder =0;
int side = 0, panel = 0, module = 0; //disk = 0, blade = 0
if (barrel) {
layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask;
module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask;
side = (module < 5) ? -1 : 1;
} else {
// endcap ids
layer = 0;
panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask;
//disk = (rawId >> diskStartBit_) & diskMask_;
side = (panel == 1) ? -1 : 1;
//blade = (rawId >> bladeStartBit_) & bladeMask_;
}
// ***special case of layer to 1 be handled here
pixelgpudetails::Pixel localPix;
if (layer == 1) {
uint32_t col = (ww >> pixelgpudetails::COL_shift) & pixelgpudetails::COL_mask;
uint32_t row = (ww >> pixelgpudetails::ROW_shift) & pixelgpudetails::ROW_mask;
localPix.row = row;
localPix.col = col;
if (includeErrors) {
if (not rocRowColIsValid(row, col)) {
uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays
err->push_back(PixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("BPIX1 Error status: %i\n", error);
continue;
}
}
} else {
// ***conversion rules for dcol and pxid
uint32_t dcol = (ww >> pixelgpudetails::DCOL_shift) & pixelgpudetails::DCOL_mask;
uint32_t pxid = (ww >> pixelgpudetails::PXID_shift) & pixelgpudetails::PXID_mask;
uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2;
uint32_t col = dcol * 2 + pxid % 2;
localPix.row = row;
localPix.col = col;
if (includeErrors and not dcolIsValid(dcol, pxid)) {
uint8_t error = conversionError(fedId, 3, debug);
err->push_back(PixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc);
continue;
}
}
pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix);
xx[gIndex] = globalPix.row; // origin shifting by 1 0-159
yy[gIndex] = globalPix.col; // origin shifting by 1 0-415
adc[gIndex] = getADC(ww);
pdigi[gIndex] = pixelgpudetails::pack(globalPix.row, globalPix.col, adc[gIndex]);
moduleId[gIndex] = detId.moduleId;
rawIdArr[gIndex] = rawId;
} // end of loop (gIndex < end)
} // end of Raw to Digi kernel
__global__ void fillHitsModuleStart(uint32_t const *__restrict__ cluStart, uint32_t *__restrict__ moduleStart) {
assert(gpuClustering::MaxNumModules < 2048); // easy to extend at least till 32*1024
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
int first = threadIdx.x;
// limit to MaxHitsInModule;
for (int i = first, iend = gpuClustering::MaxNumModules; i < iend; i += blockDim.x) {
moduleStart[i + 1] = ::min(gpuClustering::maxHitsInModule(), cluStart[i]);
}
__shared__ uint32_t ws[32];
cms::cuda::blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws);
cms::cuda::blockPrefixScan(moduleStart + 1025, moduleStart + 1025, gpuClustering::MaxNumModules - 1024, ws);
for (int i = first + 1025, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
moduleStart[i] += moduleStart[1024];
}
__syncthreads();
#ifdef GPU_DEBUG
assert(0 == moduleStart[0]);
auto c0 = ::min(gpuClustering::maxHitsInModule(), cluStart[0]);
assert(c0 == moduleStart[1]);
assert(moduleStart[1024] >= moduleStart[1023]);
assert(moduleStart[1025] >= moduleStart[1024]);
assert(moduleStart[gpuClustering::MaxNumModules] >= moduleStart[1025]);
for (int i = first, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
if (0 != i)
assert(moduleStart[i] >= moduleStart[i - i]);
// [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID]
// [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856]
if (i == 96 || i == 1184 || i == 1744 || i == gpuClustering::MaxNumModules)
printf("moduleStart %d %d\n", i, moduleStart[i]);
}
#endif
// avoid overflow
constexpr auto MAX_HITS = gpuClustering::MaxNumClusters;
for (int i = first, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
if (moduleStart[i] > MAX_HITS)
moduleStart[i] = MAX_HITS;
}
}
// Interface to outside
void SiPixelRawToClusterGPUKernel::makeClustersAsync(bool isRun2,
const SiPixelFedCablingMapGPU *cablingMap,
const unsigned char *modToUnp,
const SiPixelGainForHLTonGPU *gains,
const WordFedAppender &wordFed,
PixelFormatterErrors &&errors,
const uint32_t wordCounter,
const uint32_t fedCounter,
bool useQualityInfo,
bool includeErrors,
bool debug,
hipStream_t stream) {
nDigis = wordCounter;
#ifdef GPU_DEBUG
std::cout << "decoding " << wordCounter << " digis. Max is " << pixelgpudetails::MAX_FED_WORDS << std::endl;
#endif
digis_d = SiPixelDigisCUDA(pixelgpudetails::MAX_FED_WORDS, stream);
if (includeErrors) {
digiErrors_d = SiPixelDigiErrorsCUDA(pixelgpudetails::MAX_FED_WORDS, std::move(errors), stream);
}
clusters_d = SiPixelClustersCUDA(gpuClustering::MaxNumModules, stream);
nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(2, stream);
if (wordCounter) // protect in case of empty event....
{
const int threadsPerBlock = 512;
const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all
assert(0 == wordCounter % 2);
// wordCounter is the total no of words in each event to be trasfered on device
auto word_d = cms::cuda::make_device_unique<uint32_t[]>(wordCounter, stream);
auto fedId_d = cms::cuda::make_device_unique<uint8_t[]>(wordCounter, stream);
cudaCheck(
hipMemcpyAsync(word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), hipMemcpyDefault, stream));
cudaCheck(hipMemcpyAsync(
fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, hipMemcpyDefault, stream));
// Launch rawToDigi kernel
hipLaunchKernelGGL(( RawToDigi_kernel), dim3(blocks), dim3(threadsPerBlock), 0, stream,
cablingMap,
modToUnp,
wordCounter,
word_d.get(),
fedId_d.get(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
digis_d.pdigi(),
digis_d.rawIdArr(),
digis_d.moduleInd(),
digiErrors_d.error(), // returns nullptr if default-constructed
useQualityInfo,
includeErrors,
debug);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
if (includeErrors) {
digiErrors_d.copyErrorToHostAsync(stream);
}
}
// End of Raw2Digi and passing data for clustering
{
// clusterizer ...
using namespace gpuClustering;
int threadsPerBlock = 256;
int blocks =
(::max(int(wordCounter), int(gpuClustering::MaxNumModules)) + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( gpuCalibPixel::calibDigis), dim3(blocks), dim3(threadsPerBlock), 0, stream, isRun2,
digis_d.moduleInd(),
digis_d.c_xx(),
digis_d.c_yy(),
digis_d.adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
#ifdef GPU_DEBUG
std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock
<< " threads\n";
#endif
hipLaunchKernelGGL(( countModules), dim3(blocks), dim3(threadsPerBlock), 0, stream,
digis_d.c_moduleInd(), clusters_d.moduleStart(), digis_d.clus(), wordCounter);
cudaCheck(hipGetLastError());
// read the number of modules into a data member, used by getProduct())
cudaCheck(hipMemcpyAsync(
&(nModules_Clusters_h[0]), clusters_d.moduleStart(), sizeof(uint32_t), hipMemcpyDefault, stream));
threadsPerBlock = 256;
blocks = MaxNumModules;
#ifdef GPU_DEBUG
std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n";
#endif
hipLaunchKernelGGL(( findClus), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d.c_moduleInd(),
digis_d.c_xx(),
digis_d.c_yy(),
clusters_d.c_moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
// apply charge cut
hipLaunchKernelGGL(( clusterChargeCut), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d.moduleInd(),
digis_d.c_adc(),
clusters_d.c_moduleStart(),
clusters_d.clusInModule(),
clusters_d.c_moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(hipGetLastError());
// count the module start indices already here (instead of
// rechits) so that the number of clusters/hits can be made
// available in the rechit producer without additional points of
// synchronization/ExternalWork
// MUST be ONE block
hipLaunchKernelGGL(( fillHitsModuleStart), dim3(1), dim3(1024), 0, stream, clusters_d.c_clusInModule(), clusters_d.clusModuleStart());
// last element holds the number of all clusters
cudaCheck(hipMemcpyAsync(&(nModules_Clusters_h[1]),
clusters_d.clusModuleStart() + gpuClustering::MaxNumModules,
sizeof(uint32_t),
hipMemcpyDefault,
stream));
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
} // end clusterizer scope
}
} // namespace pixelgpudetails
| 21ab0c1b2185bad766e2c6b94084c21262249e1d.cu | /* Sushil Dubey, Shashi Dugad, TIFR, July 2017
*
* File Name: RawToClusterGPU.cu
* Description: It converts Raw data into Digi Format on GPU
* Finaly the Output of RawToDigi data is given to pixelClusterizer
*
**/
// C++ includes
#include <cassert>
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <string>
// CUDA includes
#include <cuda.h>
#include <cuda_runtime.h>
// CMSSW includes
#include "CUDADataFormats/gpuClusteringConstants.h"
#include "CUDACore/cudaCheck.h"
#include "CUDACore/device_unique_ptr.h"
#include "CUDACore/host_unique_ptr.h"
#include "CondFormats/SiPixelFedCablingMapGPU.h"
// local includes
#include "SiPixelRawToClusterGPUKernel.h"
#include "gpuCalibPixel.h"
#include "gpuClusterChargeCut.h"
#include "gpuClustering.h"
namespace pixelgpudetails {
// number of words for all the FEDs
constexpr uint32_t MAX_FED_WORDS = pixelgpudetails::MAX_FED * pixelgpudetails::MAX_WORD;
SiPixelRawToClusterGPUKernel::WordFedAppender::WordFedAppender() {
word_ = cms::cuda::make_host_noncached_unique<unsigned int[]>(MAX_FED_WORDS, cudaHostAllocWriteCombined);
fedId_ = cms::cuda::make_host_noncached_unique<unsigned char[]>(MAX_FED_WORDS, cudaHostAllocWriteCombined);
}
void SiPixelRawToClusterGPUKernel::WordFedAppender::initializeWordFed(int fedId,
unsigned int wordCounterGPU,
const uint32_t *src,
unsigned int length) {
std::memcpy(word_.get() + wordCounterGPU, src, sizeof(uint32_t) * length);
std::memset(fedId_.get() + wordCounterGPU / 2, fedId - 1200, length / 2);
}
////////////////////
__device__ uint32_t getLink(uint32_t ww) {
return ((ww >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask);
}
__device__ uint32_t getRoc(uint32_t ww) { return ((ww >> pixelgpudetails::ROC_shift) & pixelgpudetails::ROC_mask); }
__device__ uint32_t getADC(uint32_t ww) { return ((ww >> pixelgpudetails::ADC_shift) & pixelgpudetails::ADC_mask); }
__device__ bool isBarrel(uint32_t rawId) { return (1 == ((rawId >> 25) & 0x7)); }
__device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelFedCablingMapGPU *cablingMap,
uint8_t fed,
uint32_t link,
uint32_t roc) {
uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
pixelgpudetails::DetIdGPU detId = {
cablingMap->RawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]};
return detId;
}
//reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html
//http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071
// Convert local pixel to pixelgpudetails::global pixel
__device__ pixelgpudetails::Pixel frameConversion(
bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) {
int slopeRow = 0, slopeCol = 0;
int rowOffset = 0, colOffset = 0;
if (bpix) {
if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
} // if roc
} else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1
if (rocIdInDetUnit < 8) {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc;
} else {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
}
}
} else { // fpix
if (side == -1) { // pannel 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} else { // pannel 2
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} // side
}
uint32_t gRow = rowOffset + slopeRow * local.row;
uint32_t gCol = colOffset + slopeCol * local.col;
//printf("Inside frameConversion row: %u, column: %u\n", gRow, gCol);
pixelgpudetails::Pixel global = {gRow, gCol};
return global;
}
__device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) {
uint8_t errorType = 0;
// debug = true;
switch (status) {
case (1): {
if (debug)
printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId);
errorType = 35;
break;
}
case (2): {
if (debug)
printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId);
errorType = 36;
break;
}
case (3): {
if (debug)
printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId);
errorType = 37;
break;
}
case (4): {
if (debug)
printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId);
errorType = 38;
break;
}
default:
if (debug)
printf("Cabling check returned unexpected result, status = %i\n", status);
};
return errorType;
}
__device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) {
uint32_t numRowsInRoc = 80;
uint32_t numColsInRoc = 52;
/// row and collumn in ROC representation
return ((rocRow < numRowsInRoc) & (rocCol < numColsInRoc));
}
__device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); }
__device__ uint8_t checkROC(
uint32_t errorWord, uint8_t fedId, uint32_t link, const SiPixelFedCablingMapGPU *cablingMap, bool debug = false) {
uint8_t errorType = (errorWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ERROR_mask;
if (errorType < 25)
return 0;
bool errorFound = false;
switch (errorType) {
case (25): {
errorFound = true;
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1;
if (index > 1 && index <= cablingMap->size) {
if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index]))
errorFound = false;
}
if (debug and errorFound)
printf("Invalid ROC = 25 found (errorType = 25)\n");
break;
}
case (26): {
if (debug)
printf("Gap word found (errorType = 26)\n");
errorFound = true;
break;
}
case (27): {
if (debug)
printf("Dummy word found (errorType = 27)\n");
errorFound = true;
break;
}
case (28): {
if (debug)
printf("Error fifo nearly full (errorType = 28)\n");
errorFound = true;
break;
}
case (29): {
if (debug)
printf("Timeout on a channel (errorType = 29)\n");
if ((errorWord >> pixelgpudetails::OMIT_ERR_shift) & pixelgpudetails::OMIT_ERR_mask) {
if (debug)
printf("...first errorType=29 error, this gets masked out\n");
}
errorFound = true;
break;
}
case (30): {
if (debug)
printf("TBM error trailer (errorType = 30)\n");
int StateMatch_bits = 4;
int StateMatch_shift = 8;
uint32_t StateMatch_mask = ~(~uint32_t(0) << StateMatch_bits);
int StateMatch = (errorWord >> StateMatch_shift) & StateMatch_mask;
if (StateMatch != 1 && StateMatch != 8) {
if (debug)
printf("FED error 30 with unexpected State Bits (errorType = 30)\n");
}
if (StateMatch == 1)
errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30
errorFound = true;
break;
}
case (31): {
if (debug)
printf("Event number error (errorType = 31)\n");
errorFound = true;
break;
}
default:
errorFound = false;
};
return errorFound ? errorType : 0;
}
__device__ uint32_t getErrRawID(uint8_t fedId,
uint32_t errWord,
uint32_t errorType,
const SiPixelFedCablingMapGPU *cablingMap,
bool debug = false) {
uint32_t rID = 0xffffffff;
switch (errorType) {
case 25:
case 30:
case 31:
case 36:
case 40: {
//set dummy values for cabling just to get detId from link
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = 1;
uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
case 29: {
int chanNmbr = 0;
const int DB0_shift = 0;
const int DB1_shift = DB0_shift + 1;
const int DB2_shift = DB1_shift + 1;
const int DB3_shift = DB2_shift + 1;
const int DB4_shift = DB3_shift + 1;
const uint32_t DataBit_mask = ~(~uint32_t(0) << 1);
int CH1 = (errWord >> DB0_shift) & DataBit_mask;
int CH2 = (errWord >> DB1_shift) & DataBit_mask;
int CH3 = (errWord >> DB2_shift) & DataBit_mask;
int CH4 = (errWord >> DB3_shift) & DataBit_mask;
int CH5 = (errWord >> DB4_shift) & DataBit_mask;
int BLOCK_bits = 3;
int BLOCK_shift = 8;
uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits);
int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask;
int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5;
if (BLOCK % 2 == 0)
chanNmbr = (BLOCK / 2) * 9 + localCH;
else
chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH;
if ((chanNmbr < 1) || (chanNmbr > 36))
break; // signifies unexpected result
// set dummy values for cabling just to get detId from link if in Barrel
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = 1;
uint32_t link = chanNmbr;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
case 37:
case 38: {
//cabling.dcol = 0;
//cabling.pxid = 2;
uint32_t roc = (errWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ROC_mask;
uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId;
if (rID_temp != 9999)
rID = rID_temp;
break;
}
default:
break;
};
return rID;
}
// Kernel to perform Raw to Digi conversion
__global__ void RawToDigi_kernel(const SiPixelFedCablingMapGPU *cablingMap,
const unsigned char *modToUnp,
const uint32_t wordCounter,
const uint32_t *word,
const uint8_t *fedIds,
uint16_t *xx,
uint16_t *yy,
uint16_t *adc,
uint32_t *pdigi,
uint32_t *rawIdArr,
uint16_t *moduleId,
cms::cuda::SimpleVector<PixelErrorCompact> *err,
bool useQualityInfo,
bool includeErrors,
bool debug) {
//if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end);
int32_t first = threadIdx.x + blockIdx.x * blockDim.x;
for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) {
auto gIndex = iloop;
xx[gIndex] = 0;
yy[gIndex] = 0;
adc[gIndex] = 0;
bool skipROC = false;
uint8_t fedId = fedIds[gIndex / 2]; // +1200;
// initialize (too many coninue below)
pdigi[gIndex] = 0;
rawIdArr[gIndex] = 0;
moduleId[gIndex] = 9999;
uint32_t ww = word[gIndex]; // Array containing 32 bit raw data
if (ww == 0) {
// 0 is an indicator of a noise/dead channel, skip these pixels during clusterization
continue;
}
uint32_t link = getLink(ww); // Extract link
uint32_t roc = getRoc(ww); // Extract Roc in link
pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc);
uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug);
skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0);
if (includeErrors and skipROC) {
uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug);
err->push_back(PixelErrorCompact{rID, ww, errorType, fedId});
continue;
}
uint32_t rawId = detId.RawId;
uint32_t rocIdInDetUnit = detId.rocInDet;
bool barrel = isBarrel(rawId);
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
if (useQualityInfo) {
skipROC = cablingMap->badRocs[index];
if (skipROC)
continue;
}
skipROC = modToUnp[index];
if (skipROC)
continue;
uint32_t layer = 0; //, ladder =0;
int side = 0, panel = 0, module = 0; //disk = 0, blade = 0
if (barrel) {
layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask;
module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask;
side = (module < 5) ? -1 : 1;
} else {
// endcap ids
layer = 0;
panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask;
//disk = (rawId >> diskStartBit_) & diskMask_;
side = (panel == 1) ? -1 : 1;
//blade = (rawId >> bladeStartBit_) & bladeMask_;
}
// ***special case of layer to 1 be handled here
pixelgpudetails::Pixel localPix;
if (layer == 1) {
uint32_t col = (ww >> pixelgpudetails::COL_shift) & pixelgpudetails::COL_mask;
uint32_t row = (ww >> pixelgpudetails::ROW_shift) & pixelgpudetails::ROW_mask;
localPix.row = row;
localPix.col = col;
if (includeErrors) {
if (not rocRowColIsValid(row, col)) {
uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays
err->push_back(PixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("BPIX1 Error status: %i\n", error);
continue;
}
}
} else {
// ***conversion rules for dcol and pxid
uint32_t dcol = (ww >> pixelgpudetails::DCOL_shift) & pixelgpudetails::DCOL_mask;
uint32_t pxid = (ww >> pixelgpudetails::PXID_shift) & pixelgpudetails::PXID_mask;
uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2;
uint32_t col = dcol * 2 + pxid % 2;
localPix.row = row;
localPix.col = col;
if (includeErrors and not dcolIsValid(dcol, pxid)) {
uint8_t error = conversionError(fedId, 3, debug);
err->push_back(PixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc);
continue;
}
}
pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix);
xx[gIndex] = globalPix.row; // origin shifting by 1 0-159
yy[gIndex] = globalPix.col; // origin shifting by 1 0-415
adc[gIndex] = getADC(ww);
pdigi[gIndex] = pixelgpudetails::pack(globalPix.row, globalPix.col, adc[gIndex]);
moduleId[gIndex] = detId.moduleId;
rawIdArr[gIndex] = rawId;
} // end of loop (gIndex < end)
} // end of Raw to Digi kernel
__global__ void fillHitsModuleStart(uint32_t const *__restrict__ cluStart, uint32_t *__restrict__ moduleStart) {
assert(gpuClustering::MaxNumModules < 2048); // easy to extend at least till 32*1024
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
int first = threadIdx.x;
// limit to MaxHitsInModule;
for (int i = first, iend = gpuClustering::MaxNumModules; i < iend; i += blockDim.x) {
moduleStart[i + 1] = std::min(gpuClustering::maxHitsInModule(), cluStart[i]);
}
__shared__ uint32_t ws[32];
cms::cuda::blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws);
cms::cuda::blockPrefixScan(moduleStart + 1025, moduleStart + 1025, gpuClustering::MaxNumModules - 1024, ws);
for (int i = first + 1025, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
moduleStart[i] += moduleStart[1024];
}
__syncthreads();
#ifdef GPU_DEBUG
assert(0 == moduleStart[0]);
auto c0 = std::min(gpuClustering::maxHitsInModule(), cluStart[0]);
assert(c0 == moduleStart[1]);
assert(moduleStart[1024] >= moduleStart[1023]);
assert(moduleStart[1025] >= moduleStart[1024]);
assert(moduleStart[gpuClustering::MaxNumModules] >= moduleStart[1025]);
for (int i = first, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
if (0 != i)
assert(moduleStart[i] >= moduleStart[i - i]);
// [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID]
// [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856]
if (i == 96 || i == 1184 || i == 1744 || i == gpuClustering::MaxNumModules)
printf("moduleStart %d %d\n", i, moduleStart[i]);
}
#endif
// avoid overflow
constexpr auto MAX_HITS = gpuClustering::MaxNumClusters;
for (int i = first, iend = gpuClustering::MaxNumModules + 1; i < iend; i += blockDim.x) {
if (moduleStart[i] > MAX_HITS)
moduleStart[i] = MAX_HITS;
}
}
// Interface to outside
void SiPixelRawToClusterGPUKernel::makeClustersAsync(bool isRun2,
const SiPixelFedCablingMapGPU *cablingMap,
const unsigned char *modToUnp,
const SiPixelGainForHLTonGPU *gains,
const WordFedAppender &wordFed,
PixelFormatterErrors &&errors,
const uint32_t wordCounter,
const uint32_t fedCounter,
bool useQualityInfo,
bool includeErrors,
bool debug,
cudaStream_t stream) {
nDigis = wordCounter;
#ifdef GPU_DEBUG
std::cout << "decoding " << wordCounter << " digis. Max is " << pixelgpudetails::MAX_FED_WORDS << std::endl;
#endif
digis_d = SiPixelDigisCUDA(pixelgpudetails::MAX_FED_WORDS, stream);
if (includeErrors) {
digiErrors_d = SiPixelDigiErrorsCUDA(pixelgpudetails::MAX_FED_WORDS, std::move(errors), stream);
}
clusters_d = SiPixelClustersCUDA(gpuClustering::MaxNumModules, stream);
nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(2, stream);
if (wordCounter) // protect in case of empty event....
{
const int threadsPerBlock = 512;
const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all
assert(0 == wordCounter % 2);
// wordCounter is the total no of words in each event to be trasfered on device
auto word_d = cms::cuda::make_device_unique<uint32_t[]>(wordCounter, stream);
auto fedId_d = cms::cuda::make_device_unique<uint8_t[]>(wordCounter, stream);
cudaCheck(
cudaMemcpyAsync(word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), cudaMemcpyDefault, stream));
cudaCheck(cudaMemcpyAsync(
fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, cudaMemcpyDefault, stream));
// Launch rawToDigi kernel
RawToDigi_kernel<<<blocks, threadsPerBlock, 0, stream>>>(
cablingMap,
modToUnp,
wordCounter,
word_d.get(),
fedId_d.get(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
digis_d.pdigi(),
digis_d.rawIdArr(),
digis_d.moduleInd(),
digiErrors_d.error(), // returns nullptr if default-constructed
useQualityInfo,
includeErrors,
debug);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
if (includeErrors) {
digiErrors_d.copyErrorToHostAsync(stream);
}
}
// End of Raw2Digi and passing data for clustering
{
// clusterizer ...
using namespace gpuClustering;
int threadsPerBlock = 256;
int blocks =
(std::max(int(wordCounter), int(gpuClustering::MaxNumModules)) + threadsPerBlock - 1) / threadsPerBlock;
gpuCalibPixel::calibDigis<<<blocks, threadsPerBlock, 0, stream>>>(isRun2,
digis_d.moduleInd(),
digis_d.c_xx(),
digis_d.c_yy(),
digis_d.adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
#ifdef GPU_DEBUG
std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock
<< " threads\n";
#endif
countModules<<<blocks, threadsPerBlock, 0, stream>>>(
digis_d.c_moduleInd(), clusters_d.moduleStart(), digis_d.clus(), wordCounter);
cudaCheck(cudaGetLastError());
// read the number of modules into a data member, used by getProduct())
cudaCheck(cudaMemcpyAsync(
&(nModules_Clusters_h[0]), clusters_d.moduleStart(), sizeof(uint32_t), cudaMemcpyDefault, stream));
threadsPerBlock = 256;
blocks = MaxNumModules;
#ifdef GPU_DEBUG
std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n";
#endif
findClus<<<blocks, threadsPerBlock, 0, stream>>>(digis_d.c_moduleInd(),
digis_d.c_xx(),
digis_d.c_yy(),
clusters_d.c_moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
// apply charge cut
clusterChargeCut<<<blocks, threadsPerBlock, 0, stream>>>(digis_d.moduleInd(),
digis_d.c_adc(),
clusters_d.c_moduleStart(),
clusters_d.clusInModule(),
clusters_d.c_moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(cudaGetLastError());
// count the module start indices already here (instead of
// rechits) so that the number of clusters/hits can be made
// available in the rechit producer without additional points of
// synchronization/ExternalWork
// MUST be ONE block
fillHitsModuleStart<<<1, 1024, 0, stream>>>(clusters_d.c_clusInModule(), clusters_d.clusModuleStart());
// last element holds the number of all clusters
cudaCheck(cudaMemcpyAsync(&(nModules_Clusters_h[1]),
clusters_d.clusModuleStart() + gpuClustering::MaxNumModules,
sizeof(uint32_t),
cudaMemcpyDefault,
stream));
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
} // end clusterizer scope
}
} // namespace pixelgpudetails
|
c3f6041767c9ac481f3e76cd88e7f310f0a6bcb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "caffe/layers/ctc_loss_layer.hpp"
namespace std{
template <class InputIterator, class T>
T accumulate(InputIterator first, InputIterator last, T init)
{
while (first != last) {
init = init + *first; // or: init=binary_op(init,*first) for the binary_op version
++first;
}
return init;
}
}
namespace caffe {
template <>
void CtcLossLayer<double>::Forward_gpu(
const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top) {
NOT_IMPLEMENTED;
}
template <typename Dtype>
void CtcLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
hipDeviceSynchronize();
auto options = ctcOptions{};
options.loc = CTC_GPU;
CUDA_CHECK(hipStreamCreate(&(options.stream)));
options.blank_label = blank_label_;
int mini_batch = bottom[0]->shape()[1];
int alphabet_size = alphabet_size_;
const Dtype* const activations = bottom[0]->gpu_data();
Dtype* gradients = bottom[0]->mutable_gpu_diff();
CHECK(gradients != NULL) << "Oops, gradients is null";
FlattenLabels(bottom[1]);
size_t size_bytes;
CHECK_CTC_STATUS(get_workspace_size(label_lengths_.data(),
input_lengths_.data(), alphabet_size,
mini_batch, options, &size_bytes));
void* workspace;
CUDA_CHECK(hipMalloc(&workspace, size_bytes));
vector<Dtype> cost(mini_batch);
CHECK_CTC_STATUS(compute_ctc_loss(activations, gradients,
flat_labels_.data(),
label_lengths_.data(), input_lengths_.data(),
alphabet_size, mini_batch, cost.data(),
workspace, options));
Dtype loss = std::accumulate(cost.begin(), cost.end(), Dtype(0));
top[0]->mutable_cpu_data()[0] = loss / mini_batch;
CUDA_CHECK(hipFree(workspace));
CUDA_CHECK(hipStreamDestroy(options.stream));
CUDA_POST_KERNEL_CHECK;
}
template <>
void CtcLossLayer<double>::Backward_gpu(const vector<Blob<double>*>& top,
const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom) {
NOT_IMPLEMENTED;
}
template <typename Dtype>
void CtcLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if(propagate_down[0]) {
hipDeviceSynchronize();
caffe_gpu_scal(bottom[0]->count(), top[0]->cpu_diff()[0],
bottom[0]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CtcLossLayer);
}
| c3f6041767c9ac481f3e76cd88e7f310f0a6bcb1.cu | #include "caffe/layers/ctc_loss_layer.hpp"
namespace std{
template <class InputIterator, class T>
T accumulate(InputIterator first, InputIterator last, T init)
{
while (first != last) {
init = init + *first; // or: init=binary_op(init,*first) for the binary_op version
++first;
}
return init;
}
}
namespace caffe {
template <>
void CtcLossLayer<double>::Forward_gpu(
const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top) {
NOT_IMPLEMENTED;
}
template <typename Dtype>
void CtcLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
cudaDeviceSynchronize();
auto options = ctcOptions{};
options.loc = CTC_GPU;
CUDA_CHECK(cudaStreamCreate(&(options.stream)));
options.blank_label = blank_label_;
int mini_batch = bottom[0]->shape()[1];
int alphabet_size = alphabet_size_;
const Dtype* const activations = bottom[0]->gpu_data();
Dtype* gradients = bottom[0]->mutable_gpu_diff();
CHECK(gradients != NULL) << "Oops, gradients is null";
FlattenLabels(bottom[1]);
size_t size_bytes;
CHECK_CTC_STATUS(get_workspace_size(label_lengths_.data(),
input_lengths_.data(), alphabet_size,
mini_batch, options, &size_bytes));
void* workspace;
CUDA_CHECK(cudaMalloc(&workspace, size_bytes));
vector<Dtype> cost(mini_batch);
CHECK_CTC_STATUS(compute_ctc_loss(activations, gradients,
flat_labels_.data(),
label_lengths_.data(), input_lengths_.data(),
alphabet_size, mini_batch, cost.data(),
workspace, options));
Dtype loss = std::accumulate(cost.begin(), cost.end(), Dtype(0));
top[0]->mutable_cpu_data()[0] = loss / mini_batch;
CUDA_CHECK(cudaFree(workspace));
CUDA_CHECK(cudaStreamDestroy(options.stream));
CUDA_POST_KERNEL_CHECK;
}
template <>
void CtcLossLayer<double>::Backward_gpu(const vector<Blob<double>*>& top,
const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom) {
NOT_IMPLEMENTED;
}
template <typename Dtype>
void CtcLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if(propagate_down[0]) {
cudaDeviceSynchronize();
caffe_gpu_scal(bottom[0]->count(), top[0]->cpu_diff()[0],
bottom[0]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CtcLossLayer);
}
|
e9d67d31b52b21264fdd60a8c8db8fac75dc628a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Software License Agreement (BSD License)
* Copyright (c) 2013, Georgia Institute of Technology
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
3* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**********************************************
* @file costs.cu
* @author Grady Williams <[email protected]>
* @date May 24, 2017
* @copyright 2017 Georgia Institute of Technology
* @brief MPPICosts class implementation
***********************************************/
#include "gpu_err_chk.h"
#include "debug_kernels.cuh"
#include <stdio.h>
#include <stdlib.h>
namespace drive_control
{
inline MPPICosts::MPPICosts(int width, int height)
{
width_ = width;
height_ = height;
allocateTexMem();
//Initialize memory for device cost param struct
HANDLE_ERROR(hipMalloc((void **)¶ms_d_, sizeof(CostParams)));
debugging_ = false;
initCostmap();
}
inline MPPICosts::MPPICosts(ros::NodeHandle nh)
{
//Transform from world coordinates to normalized grid coordinates
Eigen::Matrix3f R;
Eigen::Array3f trs;
HANDLE_ERROR(hipMalloc((void **)¶ms_d_, sizeof(CostParams))); //Initialize memory for device cost param struct
//Get the map path
std::string map_path = getRosParam<std::string>("map_path", nh);
track_costs_ = loadTrackData(map_path, R, trs); //R and trs passed by reference
updateTransform(R, trs);
updateParams(nh);
allocateTexMem();
costmapToTexture();
debugging_ = false;
}
inline void MPPICosts::allocateTexMem()
{
//Allocate memory for the cuda array which is bound the costmap_tex_
channelDesc_ = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
HANDLE_ERROR(hipMallocArray(&costmapArray_d_, &channelDesc_, width_, height_));
}
inline void MPPICosts::updateParams_dcfg(drive_control::PathIntegralParamsConfig config)
{
params_.desired_speed = (float)config.desired_speed;
params_.speed_coeff = (float)config.speed_coefficient;
params_.track_coeff = (float)config.track_coefficient;
params_.max_slip_ang = (float)config.max_slip_angle;
params_.slip_penalty = (float)config.slip_penalty;
params_.crash_coeff = (float)config.crash_coefficient;
params_.track_slop = (float)config.track_slop;
params_.steering_coeff = (float)config.steering_coeff;
params_.throttle_coeff = (float)config.throttle_coeff;
paramsToDevice();
}
inline void MPPICosts::initCostmap()
{
track_costs_ = std::vector<float4>(width_ * height_);
//Initialize costmap to zeros
for (int i = 0; i < width_ * height_; i++)
{
track_costs_[i].x = 0;
track_costs_[i].y = 0;
track_costs_[i].z = 0;
track_costs_[i].w = 0;
}
}
inline void MPPICosts::costmapToTexture(float *costmap, int channel)
{
switch (channel)
{
case 0:
for (int i = 0; i < width_ * height_; i++)
{
track_costs_[i].x = costmap[i];
}
break;
case 1:
for (int i = 0; i < width_ * height_; i++)
{
track_costs_[i].y = costmap[i];
}
break;
case 2:
for (int i = 0; i < width_ * height_; i++)
{
track_costs_[i].z = costmap[i];
}
break;
case 3:
for (int i = 0; i < width_ * height_; i++)
{
track_costs_[i].w = costmap[i];
}
break;
}
costmapToTexture();
}
inline void MPPICosts::costmapToTexture()
{
//costmap_ = costmap;
//Transfer CPU mem to GPU
float4 *costmap_ptr = track_costs_.data();
HANDLE_ERROR(hipMemcpyToArray(costmapArray_d_, 0, 0, costmap_ptr, width_ * height_ * sizeof(float4), hipMemcpyHostToDevice));
hipStreamSynchronize(stream_);
//Specify texture
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = costmapArray_d_;
//Specify texture object parameters
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = 1;
//Destroy current texture and create new texture object
HANDLE_ERROR(hipDestroyTextureObject(costmap_tex_));
HANDLE_ERROR(hipCreateTextureObject(&costmap_tex_, &resDesc, &texDesc, NULL));
}
inline void MPPICosts::updateParams(ros::NodeHandle nh)
{
//Transfer to the cost params struct
l1_cost_ = getRosParam<bool>("l1_cost", nh);
params_.desired_speed = getRosParam<double>("desired_speed", nh);
params_.speed_coeff = getRosParam<double>("speed_coefficient", nh);
params_.track_coeff = getRosParam<double>("track_coefficient", nh);
params_.max_slip_ang = getRosParam<double>("max_slip_angle", nh);
params_.slip_penalty = getRosParam<double>("slip_penalty", nh);
params_.track_slop = getRosParam<double>("track_slop", nh);
params_.crash_coeff = getRosParam<double>("crash_coeff", nh);
params_.steering_coeff = getRosParam<double>("steering_coeff", nh);
params_.throttle_coeff = getRosParam<double>("throttle_coeff", nh);
params_.boundary_threshold = getRosParam<double>("boundary_threshold", nh);
params_.discount = getRosParam<double>("discount", nh);
params_.num_timesteps = getRosParam<int>("num_timesteps", nh);
//Move the updated parameters to gpu memory
paramsToDevice();
}
inline void MPPICosts::updateTransform(Eigen::MatrixXf m, Eigen::ArrayXf trs)
{
params_.r_c1.x = m(0, 0);
params_.r_c1.y = m(1, 0);
params_.r_c1.z = m(2, 0);
params_.r_c2.x = m(0, 1);
params_.r_c2.y = m(1, 1);
params_.r_c2.z = m(2, 1);
params_.trs.x = trs(0);
params_.trs.y = trs(1);
params_.trs.z = trs(2);
//Move the updated parameters to gpu memory
paramsToDevice();
}
inline std::vector<float4> MPPICosts::loadTrackData(std::string map_path, Eigen::Matrix3f &R, Eigen::Array3f &trs)
{
if (!fileExists(map_path))
{
ROS_FATAL("Could not load costmap at path: %s", map_path.c_str());
}
cnpy::npz_t map_dict = cnpy::npz_load(map_path);
float x_min, x_max, y_min, y_max, ppm;
float *xBounds = map_dict["xBounds"].data<float>();
float *yBounds = map_dict["yBounds"].data<float>();
float *pixelsPerMeter = map_dict["pixelsPerMeter"].data<float>();
x_min = xBounds[0];
x_max = xBounds[1];
y_min = yBounds[0];
y_max = yBounds[1];
ppm = pixelsPerMeter[0];
width_ = int((x_max - x_min) * ppm);
height_ = int((y_max - y_min) * ppm);
initCostmap();
std::vector<float4> track_costs(width_ * height_);
float *channel0 = map_dict["channel0"].data<float>();
float *channel1 = map_dict["channel1"].data<float>();
float *channel2 = map_dict["channel2"].data<float>();
float *channel3 = map_dict["channel3"].data<float>();
for (int i = 0; i < width_ * height_; i++)
{
track_costs[i].x = channel0[i];
track_costs[i].y = channel1[i];
track_costs[i].z = channel2[i];
track_costs[i].w = channel3[i];
}
//Save the scaling and offset
R << 1. / (x_max - x_min), 0, 0,
0, 1. / (y_max - y_min), 0,
0, 0, 1;
trs << -x_min / (x_max - x_min), -y_min / (y_max - y_min), 1;
return track_costs;
}
inline void MPPICosts::paramsToDevice()
{
HANDLE_ERROR(hipMemcpy(params_d_, ¶ms_, sizeof(CostParams), hipMemcpyHostToDevice));
HANDLE_ERROR(hipStreamSynchronize(stream_));
}
inline void MPPICosts::getCostInfo()
{
}
inline float MPPICosts::getDesiredSpeed()
{
return params_.desired_speed;
}
inline void MPPICosts::setDesiredSpeed(float desired_speed)
{
params_.desired_speed = desired_speed;
paramsToDevice();
}
inline void MPPICosts::debugDisplayInit()
{
debugDisplayInit(10, 10, 50);
}
inline void MPPICosts::debugDisplayInit(int width_m, int height_m, int ppm)
{
debug_img_width_ = width_m;
debug_img_height_ = height_m;
debug_img_ppm_ = ppm;
debug_img_size_ = (width_m * ppm) * (height_m * ppm);
debug_data_ = new float[debug_img_size_];
debugging_ = true;
HANDLE_ERROR(hipMalloc((void **)&debug_data_d_, debug_img_size_ * sizeof(float)));
}
inline cv::Mat MPPICosts::getDebugDisplay(float x, float y, float heading)
{
cv::Mat debug_img; ///< OpenCV matrix for display debug info.
if (!debugging_)
{
debugDisplayInit();
}
launchDebugCostKernel(x, y, heading, debug_img_width_, debug_img_height_, debug_img_ppm_,
costmap_tex_, debug_data_d_, params_.r_c1, params_.r_c2, params_.trs, stream_);
//Now we just have to display debug_data_d_
HANDLE_ERROR(hipMemcpy(debug_data_, debug_data_d_, debug_img_size_ * sizeof(float), hipMemcpyDeviceToHost));
hipStreamSynchronize(stream_);
debug_img = cv::Mat(debug_img_width_ * debug_img_ppm_, debug_img_height_ * debug_img_ppm_, CV_32F, debug_data_);
return debug_img;
}
inline void MPPICosts::freeCudaMem()
{
HANDLE_ERROR(hipDestroyTextureObject(costmap_tex_));
HANDLE_ERROR(hipFreeArray(costmapArray_d_));
HANDLE_ERROR(hipFree(params_d_));
if (debugging_)
{
HANDLE_ERROR(hipFree(debug_data_d_));
}
}
// inline void MPPICosts::setTrackPoints(std::vector<nav_msgs::Odometry> trackPoints){
// for(int i = 0; i < trackPoints.size();i++){
// track_points_x_.push_back(trackPoints[i].pose.pose.position.x);
// track_points_y_.push_back(trackPoints[i].pose.pose.position.y);
// }
// track_point_size_ = trackPoints.size();
// }
inline void MPPICosts::updateCostmap(std::vector<int> description, std::vector<float> data) {}
inline void MPPICosts::updateObstacles(std::vector<int> description, std::vector<float> data) {}
inline __host__ __device__ void MPPICosts::getCrash(float *state, int *crash)
{
if (fabs(state[3]) > 1.57)
{
crash[0] = 1;
}
}
inline __host__ __device__ float MPPICosts::getControlCost(float *u, float *du, float *vars)
{
float control_cost = 0;
control_cost += params_d_->steering_coeff * du[0] * (u[0] - du[0]) / (vars[0] * vars[0]);
control_cost += params_d_->throttle_coeff * du[1] * (u[1] - du[1]) / (vars[1] * vars[1]);
return control_cost;
}
inline __host__ __device__ float MPPICosts::getSpeedCost(float *s, int *crash)
{
float cost = 0;
float error = s[4] - params_d_->desired_speed;
if (l1_cost_)
{
cost = fabs(error);
}
else
{
cost = error * error;
}
return (params_d_->speed_coeff * cost);
}
inline __host__ __device__ float MPPICosts::getCrashCost(float *s, int *crash, int timestep)
{
float crash_cost = 0;
if (crash[0] > 0)
{
crash_cost = params_d_->crash_coeff;
}
return crash_cost;
}
inline __host__ __device__ float MPPICosts::getStabilizingCost(float *s)
{
float stabilizing_cost = 0;
if (fabs(s[4]) > 0.001)
{
float slip = -atan(s[5] / fabs(s[4]));
stabilizing_cost = params_d_->slip_penalty * powf(slip, 2);
if (fabs(-atan(s[5] / fabs(s[4]))) > params_d_->max_slip_ang)
{
//If the slip angle is above the max slip angle kill the trajectory.
stabilizing_cost += params_d_->crash_coeff;
}
}
return stabilizing_cost;
}
inline __host__ __device__ void MPPICosts::coorTransform(float x, float y, float *u, float *v, float *w)
{
//Compute a projective transform of (x, y, 0, 1)
u[0] = params_d_->r_c1.x * x + params_d_->r_c2.x * y + params_d_->trs.x;
v[0] = params_d_->r_c1.y * x + params_d_->r_c2.y * y + params_d_->trs.y;
w[0] = params_d_->r_c1.z * x + params_d_->r_c2.z * y + params_d_->trs.z;
}
inline __device__ float MPPICosts::getTrackCost(float *s, int *crash, float* traj_path, int num_traj_points)
// inline __device__ float MPPICosts::getTrackCost(float* s, int* crash)
{
float track_cost = 0;
//stimeStep
//s,
float minDistance = 100000;
int minIndex = -1;
// printf("traj_path[%d].x=%lf, traj_path[%d].y=%lf\n", 1648, traj_path[1648*3], 1648,traj_path[1648*3 + 1]);
if (num_traj_points==0)
{
printf("trrack empty!!!\n");
return 9999999;
}
for(int i = 0; i < num_traj_points; i++){
float dis = fabs(traj_path[i*3] - s[0]) + fabs(traj_path[i*3+1] - s[1]);
if(minDistance > dis){
minDistance = dis;
minIndex = i;
}
}
// printf("before return\n");
return 0;
// // printf("before print\n");
// // printf("%lf, %lf \n", s[0], s[1]);
// // nav_msgs::Path traj_path = track_points_;
// // printf("before track\n");
// // printf("path[0].x=%lf, path[0].y=%lf\n", track_points_.poses[0].pose.position.x, track_points_.poses[0].pose.position.y);
// // printf("track_size=%d\n", track_points_.poses.size());
// for (int i = 0; i < track_points_.poses.size(); i++)
// {
// float dis = fabs(track_points_.poses[i].pose.position.x - s[0]) + fabs(track_points_.poses[i].pose.position.y - s[1]);
// // float dis = fabs(track_points_x_[i] - s[0]) + fabs(track_points_y_[i] - s[1]);
// if (minDistance > dis)
// {
// minDistance = dis;
// minIndex = i;
// }
// }
// printf("mark1\n");
// for(int i = 0; i < track_points_.size(); i++){
// float dis = fabs(track_points_[i].pose.pose.position.x - s[0]) + fabs(track_points_[i].pose.pose.position.y - s[1]);
// // float dis = fabs(track_points_x_[i] - s[0]) + fabs(track_points_y_[i] - s[1]);
// if(minDistance > dis){
// minDistance = dis;
// minIndex = i;
// }
// }
//
//todo
track_cost = minDistance;
if (fabs(track_cost) < params_d_->track_slop)
{
track_cost = 0;
}
else
{
track_cost = track_cost * params_d_->track_coeff;
}
printf("mark2\n");
if (track_cost >= params_d_->boundary_threshold)
{
crash[0] = 1;
}
printf("track_cpst= %lf\n", track_cost);
return track_cost;
/////////////////////////////////////
// float track_cost = 0;
// //Compute a transformation to get the (x,y) positions of the front and back of the car.
// float x_front = s[0] + FRONT_D*__cosf(s[2]);
// float y_front = s[1] + FRONT_D*__sinf(s[2]);
// float x_back = s[0] + BACK_D*__cosf(s[2]);
// float y_back = s[1] + BACK_D*__sinf(s[2]);
// float u,v,w; //Transformed coordinates
// //Cost of front of the car
// coorTransform(x_front, y_front, &u, &v, &w);
// float4 track_params_front = tex2D<float4>(costmap_tex_, u/w, v/w);
// //Cost for back of the car
// coorTransform(x_back, y_back, &u, &v, &w);
// float4 track_params_back = tex2D<float4>(costmap_tex_, u/w, v/w);
// float track_cost_front = track_params_front.x;
// float track_cost_back = track_params_back.x;
// track_cost = (fabs(track_cost_front) + fabs(track_cost_back) )/2.0;
// if (fabs(track_cost) < params_d_->track_slop) {
// track_cost = 0;
// }
// else {
// track_cost = params_d_->track_coeff*track_cost;
// }
// if (track_cost_front >= params_d_->boundary_threshold || track_cost_back >= params_d_->boundary_threshold) {
// crash[0] = 1;
// }
// return track_cost;
}
//Compute the immediate running cost.
inline __device__ float MPPICosts::computeCost(float *s, float *u, float *du,
float *vars, int *crash, int timestep, float* traj_path, int num_traj_points)
{
float control_cost = getControlCost(u, du, vars);
float track_cost = getTrackCost(s, crash, traj_path, num_traj_points);
float speed_cost = getSpeedCost(s, crash);
float crash_cost = (1.0 - params_.discount) * getCrashCost(s, crash, timestep);
float stabilizing_cost = getStabilizingCost(s);
float cost = control_cost + speed_cost + crash_cost + track_cost + stabilizing_cost;
if (cost > 1e12 || isnan(cost))
{
cost = 1e12;
}
return cost;
}
inline __device__ float MPPICosts::terminalCost(float *s)
{
return 0.0;
}
} // namespace drive_control
| e9d67d31b52b21264fdd60a8c8db8fac75dc628a.cu | /*
* Software License Agreement (BSD License)
* Copyright (c) 2013, Georgia Institute of Technology
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
3* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**********************************************
* @file costs.cu
* @author Grady Williams <[email protected]>
* @date May 24, 2017
* @copyright 2017 Georgia Institute of Technology
* @brief MPPICosts class implementation
***********************************************/
#include "gpu_err_chk.h"
#include "debug_kernels.cuh"
#include <stdio.h>
#include <stdlib.h>
namespace drive_control
{
inline MPPICosts::MPPICosts(int width, int height)
{
width_ = width;
height_ = height;
allocateTexMem();
//Initialize memory for device cost param struct
HANDLE_ERROR(cudaMalloc((void **)¶ms_d_, sizeof(CostParams)));
debugging_ = false;
initCostmap();
}
inline MPPICosts::MPPICosts(ros::NodeHandle nh)
{
//Transform from world coordinates to normalized grid coordinates
Eigen::Matrix3f R;
Eigen::Array3f trs;
HANDLE_ERROR(cudaMalloc((void **)¶ms_d_, sizeof(CostParams))); //Initialize memory for device cost param struct
//Get the map path
std::string map_path = getRosParam<std::string>("map_path", nh);
track_costs_ = loadTrackData(map_path, R, trs); //R and trs passed by reference
updateTransform(R, trs);
updateParams(nh);
allocateTexMem();
costmapToTexture();
debugging_ = false;
}
inline void MPPICosts::allocateTexMem()
{
//Allocate memory for the cuda array which is bound the costmap_tex_
channelDesc_ = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
HANDLE_ERROR(cudaMallocArray(&costmapArray_d_, &channelDesc_, width_, height_));
}
inline void MPPICosts::updateParams_dcfg(drive_control::PathIntegralParamsConfig config)
{
params_.desired_speed = (float)config.desired_speed;
params_.speed_coeff = (float)config.speed_coefficient;
params_.track_coeff = (float)config.track_coefficient;
params_.max_slip_ang = (float)config.max_slip_angle;
params_.slip_penalty = (float)config.slip_penalty;
params_.crash_coeff = (float)config.crash_coefficient;
params_.track_slop = (float)config.track_slop;
params_.steering_coeff = (float)config.steering_coeff;
params_.throttle_coeff = (float)config.throttle_coeff;
paramsToDevice();
}
inline void MPPICosts::initCostmap()
{
track_costs_ = std::vector<float4>(width_ * height_);
//Initialize costmap to zeros
for (int i = 0; i < width_ * height_; i++)
{
track_costs_[i].x = 0;
track_costs_[i].y = 0;
track_costs_[i].z = 0;
track_costs_[i].w = 0;
}
}
inline void MPPICosts::costmapToTexture(float *costmap, int channel)
{
switch (channel)
{
case 0:
for (int i = 0; i < width_ * height_; i++)
{
track_costs_[i].x = costmap[i];
}
break;
case 1:
for (int i = 0; i < width_ * height_; i++)
{
track_costs_[i].y = costmap[i];
}
break;
case 2:
for (int i = 0; i < width_ * height_; i++)
{
track_costs_[i].z = costmap[i];
}
break;
case 3:
for (int i = 0; i < width_ * height_; i++)
{
track_costs_[i].w = costmap[i];
}
break;
}
costmapToTexture();
}
inline void MPPICosts::costmapToTexture()
{
//costmap_ = costmap;
//Transfer CPU mem to GPU
float4 *costmap_ptr = track_costs_.data();
HANDLE_ERROR(cudaMemcpyToArray(costmapArray_d_, 0, 0, costmap_ptr, width_ * height_ * sizeof(float4), cudaMemcpyHostToDevice));
cudaStreamSynchronize(stream_);
//Specify texture
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = costmapArray_d_;
//Specify texture object parameters
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
//Destroy current texture and create new texture object
HANDLE_ERROR(cudaDestroyTextureObject(costmap_tex_));
HANDLE_ERROR(cudaCreateTextureObject(&costmap_tex_, &resDesc, &texDesc, NULL));
}
inline void MPPICosts::updateParams(ros::NodeHandle nh)
{
//Transfer to the cost params struct
l1_cost_ = getRosParam<bool>("l1_cost", nh);
params_.desired_speed = getRosParam<double>("desired_speed", nh);
params_.speed_coeff = getRosParam<double>("speed_coefficient", nh);
params_.track_coeff = getRosParam<double>("track_coefficient", nh);
params_.max_slip_ang = getRosParam<double>("max_slip_angle", nh);
params_.slip_penalty = getRosParam<double>("slip_penalty", nh);
params_.track_slop = getRosParam<double>("track_slop", nh);
params_.crash_coeff = getRosParam<double>("crash_coeff", nh);
params_.steering_coeff = getRosParam<double>("steering_coeff", nh);
params_.throttle_coeff = getRosParam<double>("throttle_coeff", nh);
params_.boundary_threshold = getRosParam<double>("boundary_threshold", nh);
params_.discount = getRosParam<double>("discount", nh);
params_.num_timesteps = getRosParam<int>("num_timesteps", nh);
//Move the updated parameters to gpu memory
paramsToDevice();
}
inline void MPPICosts::updateTransform(Eigen::MatrixXf m, Eigen::ArrayXf trs)
{
params_.r_c1.x = m(0, 0);
params_.r_c1.y = m(1, 0);
params_.r_c1.z = m(2, 0);
params_.r_c2.x = m(0, 1);
params_.r_c2.y = m(1, 1);
params_.r_c2.z = m(2, 1);
params_.trs.x = trs(0);
params_.trs.y = trs(1);
params_.trs.z = trs(2);
//Move the updated parameters to gpu memory
paramsToDevice();
}
inline std::vector<float4> MPPICosts::loadTrackData(std::string map_path, Eigen::Matrix3f &R, Eigen::Array3f &trs)
{
if (!fileExists(map_path))
{
ROS_FATAL("Could not load costmap at path: %s", map_path.c_str());
}
cnpy::npz_t map_dict = cnpy::npz_load(map_path);
float x_min, x_max, y_min, y_max, ppm;
float *xBounds = map_dict["xBounds"].data<float>();
float *yBounds = map_dict["yBounds"].data<float>();
float *pixelsPerMeter = map_dict["pixelsPerMeter"].data<float>();
x_min = xBounds[0];
x_max = xBounds[1];
y_min = yBounds[0];
y_max = yBounds[1];
ppm = pixelsPerMeter[0];
width_ = int((x_max - x_min) * ppm);
height_ = int((y_max - y_min) * ppm);
initCostmap();
std::vector<float4> track_costs(width_ * height_);
float *channel0 = map_dict["channel0"].data<float>();
float *channel1 = map_dict["channel1"].data<float>();
float *channel2 = map_dict["channel2"].data<float>();
float *channel3 = map_dict["channel3"].data<float>();
for (int i = 0; i < width_ * height_; i++)
{
track_costs[i].x = channel0[i];
track_costs[i].y = channel1[i];
track_costs[i].z = channel2[i];
track_costs[i].w = channel3[i];
}
//Save the scaling and offset
R << 1. / (x_max - x_min), 0, 0,
0, 1. / (y_max - y_min), 0,
0, 0, 1;
trs << -x_min / (x_max - x_min), -y_min / (y_max - y_min), 1;
return track_costs;
}
inline void MPPICosts::paramsToDevice()
{
HANDLE_ERROR(cudaMemcpy(params_d_, ¶ms_, sizeof(CostParams), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaStreamSynchronize(stream_));
}
inline void MPPICosts::getCostInfo()
{
}
inline float MPPICosts::getDesiredSpeed()
{
return params_.desired_speed;
}
inline void MPPICosts::setDesiredSpeed(float desired_speed)
{
params_.desired_speed = desired_speed;
paramsToDevice();
}
inline void MPPICosts::debugDisplayInit()
{
debugDisplayInit(10, 10, 50);
}
inline void MPPICosts::debugDisplayInit(int width_m, int height_m, int ppm)
{
debug_img_width_ = width_m;
debug_img_height_ = height_m;
debug_img_ppm_ = ppm;
debug_img_size_ = (width_m * ppm) * (height_m * ppm);
debug_data_ = new float[debug_img_size_];
debugging_ = true;
HANDLE_ERROR(cudaMalloc((void **)&debug_data_d_, debug_img_size_ * sizeof(float)));
}
inline cv::Mat MPPICosts::getDebugDisplay(float x, float y, float heading)
{
cv::Mat debug_img; ///< OpenCV matrix for display debug info.
if (!debugging_)
{
debugDisplayInit();
}
launchDebugCostKernel(x, y, heading, debug_img_width_, debug_img_height_, debug_img_ppm_,
costmap_tex_, debug_data_d_, params_.r_c1, params_.r_c2, params_.trs, stream_);
//Now we just have to display debug_data_d_
HANDLE_ERROR(cudaMemcpy(debug_data_, debug_data_d_, debug_img_size_ * sizeof(float), cudaMemcpyDeviceToHost));
cudaStreamSynchronize(stream_);
debug_img = cv::Mat(debug_img_width_ * debug_img_ppm_, debug_img_height_ * debug_img_ppm_, CV_32F, debug_data_);
return debug_img;
}
inline void MPPICosts::freeCudaMem()
{
HANDLE_ERROR(cudaDestroyTextureObject(costmap_tex_));
HANDLE_ERROR(cudaFreeArray(costmapArray_d_));
HANDLE_ERROR(cudaFree(params_d_));
if (debugging_)
{
HANDLE_ERROR(cudaFree(debug_data_d_));
}
}
// inline void MPPICosts::setTrackPoints(std::vector<nav_msgs::Odometry> trackPoints){
// for(int i = 0; i < trackPoints.size();i++){
// track_points_x_.push_back(trackPoints[i].pose.pose.position.x);
// track_points_y_.push_back(trackPoints[i].pose.pose.position.y);
// }
// track_point_size_ = trackPoints.size();
// }
inline void MPPICosts::updateCostmap(std::vector<int> description, std::vector<float> data) {}
inline void MPPICosts::updateObstacles(std::vector<int> description, std::vector<float> data) {}
inline __host__ __device__ void MPPICosts::getCrash(float *state, int *crash)
{
if (fabs(state[3]) > 1.57)
{
crash[0] = 1;
}
}
inline __host__ __device__ float MPPICosts::getControlCost(float *u, float *du, float *vars)
{
float control_cost = 0;
control_cost += params_d_->steering_coeff * du[0] * (u[0] - du[0]) / (vars[0] * vars[0]);
control_cost += params_d_->throttle_coeff * du[1] * (u[1] - du[1]) / (vars[1] * vars[1]);
return control_cost;
}
inline __host__ __device__ float MPPICosts::getSpeedCost(float *s, int *crash)
{
float cost = 0;
float error = s[4] - params_d_->desired_speed;
if (l1_cost_)
{
cost = fabs(error);
}
else
{
cost = error * error;
}
return (params_d_->speed_coeff * cost);
}
inline __host__ __device__ float MPPICosts::getCrashCost(float *s, int *crash, int timestep)
{
float crash_cost = 0;
if (crash[0] > 0)
{
crash_cost = params_d_->crash_coeff;
}
return crash_cost;
}
inline __host__ __device__ float MPPICosts::getStabilizingCost(float *s)
{
float stabilizing_cost = 0;
if (fabs(s[4]) > 0.001)
{
float slip = -atan(s[5] / fabs(s[4]));
stabilizing_cost = params_d_->slip_penalty * powf(slip, 2);
if (fabs(-atan(s[5] / fabs(s[4]))) > params_d_->max_slip_ang)
{
//If the slip angle is above the max slip angle kill the trajectory.
stabilizing_cost += params_d_->crash_coeff;
}
}
return stabilizing_cost;
}
inline __host__ __device__ void MPPICosts::coorTransform(float x, float y, float *u, float *v, float *w)
{
//Compute a projective transform of (x, y, 0, 1)
u[0] = params_d_->r_c1.x * x + params_d_->r_c2.x * y + params_d_->trs.x;
v[0] = params_d_->r_c1.y * x + params_d_->r_c2.y * y + params_d_->trs.y;
w[0] = params_d_->r_c1.z * x + params_d_->r_c2.z * y + params_d_->trs.z;
}
inline __device__ float MPPICosts::getTrackCost(float *s, int *crash, float* traj_path, int num_traj_points)
// inline __device__ float MPPICosts::getTrackCost(float* s, int* crash)
{
float track_cost = 0;
//s是某条采样轨迹某个timeStep的状态
//找出距离s最近的点,使用曼哈顿距离以加快速度
float minDistance = 100000;
int minIndex = -1;
// printf("traj_path[%d].x=%lf, traj_path[%d].y=%lf\n", 1648, traj_path[1648*3], 1648,traj_path[1648*3 + 1]);
if (num_traj_points==0)
{
printf("trrack empty!!!\n");
return 9999999;
}
for(int i = 0; i < num_traj_points; i++){
float dis = fabs(traj_path[i*3] - s[0]) + fabs(traj_path[i*3+1] - s[1]);
if(minDistance > dis){
minDistance = dis;
minIndex = i;
}
}
// printf("before return\n");
return 0;
// // printf("before print\n");
// // printf("%lf, %lf \n", s[0], s[1]);
// // nav_msgs::Path traj_path = track_points_;
// // printf("before track\n");
// // printf("path[0].x=%lf, path[0].y=%lf\n", track_points_.poses[0].pose.position.x, track_points_.poses[0].pose.position.y);
// // printf("track_size=%d\n", track_points_.poses.size());
// for (int i = 0; i < track_points_.poses.size(); i++)
// {
// float dis = fabs(track_points_.poses[i].pose.position.x - s[0]) + fabs(track_points_.poses[i].pose.position.y - s[1]);
// // float dis = fabs(track_points_x_[i] - s[0]) + fabs(track_points_y_[i] - s[1]);
// if (minDistance > dis)
// {
// minDistance = dis;
// minIndex = i;
// }
// }
// printf("mark1\n");
// for(int i = 0; i < track_points_.size(); i++){
// float dis = fabs(track_points_[i].pose.pose.position.x - s[0]) + fabs(track_points_[i].pose.pose.position.y - s[1]);
// // float dis = fabs(track_points_x_[i] - s[0]) + fabs(track_points_y_[i] - s[1]);
// if(minDistance > dis){
// minDistance = dis;
// minIndex = i;
// }
// }
//曼哈顿距离最近点附近搜索欧式距离最近点
//todo
track_cost = minDistance;
if (fabs(track_cost) < params_d_->track_slop)
{
track_cost = 0;
}
else
{
track_cost = track_cost * params_d_->track_coeff;
}
printf("mark2\n");
if (track_cost >= params_d_->boundary_threshold)
{
crash[0] = 1;
}
printf("track_cpst= %lf\n", track_cost);
return track_cost;
/////////////////////////////////////
// float track_cost = 0;
// //Compute a transformation to get the (x,y) positions of the front and back of the car.
// float x_front = s[0] + FRONT_D*__cosf(s[2]);
// float y_front = s[1] + FRONT_D*__sinf(s[2]);
// float x_back = s[0] + BACK_D*__cosf(s[2]);
// float y_back = s[1] + BACK_D*__sinf(s[2]);
// float u,v,w; //Transformed coordinates
// //Cost of front of the car
// coorTransform(x_front, y_front, &u, &v, &w);
// float4 track_params_front = tex2D<float4>(costmap_tex_, u/w, v/w);
// //Cost for back of the car
// coorTransform(x_back, y_back, &u, &v, &w);
// float4 track_params_back = tex2D<float4>(costmap_tex_, u/w, v/w);
// float track_cost_front = track_params_front.x;
// float track_cost_back = track_params_back.x;
// track_cost = (fabs(track_cost_front) + fabs(track_cost_back) )/2.0;
// if (fabs(track_cost) < params_d_->track_slop) {
// track_cost = 0;
// }
// else {
// track_cost = params_d_->track_coeff*track_cost;
// }
// if (track_cost_front >= params_d_->boundary_threshold || track_cost_back >= params_d_->boundary_threshold) {
// crash[0] = 1;
// }
// return track_cost;
}
//Compute the immediate running cost.
inline __device__ float MPPICosts::computeCost(float *s, float *u, float *du,
float *vars, int *crash, int timestep, float* traj_path, int num_traj_points)
{
float control_cost = getControlCost(u, du, vars);
float track_cost = getTrackCost(s, crash, traj_path, num_traj_points);
float speed_cost = getSpeedCost(s, crash);
float crash_cost = (1.0 - params_.discount) * getCrashCost(s, crash, timestep);
float stabilizing_cost = getStabilizingCost(s);
float cost = control_cost + speed_cost + crash_cost + track_cost + stabilizing_cost;
if (cost > 1e12 || isnan(cost))
{
cost = 1e12;
}
return cost;
}
inline __device__ float MPPICosts::terminalCost(float *s)
{
return 0.0;
}
} // namespace drive_control
|
fbd17eabbf969df9bef7aab69317b9c1895056c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int* in, int d, int n){
int gid = threadIdx.x + blockIdx.x * blockDim.x;
if(gid >= n) return ;
int pre = (d==0) ? 1 : (2<<(d-1));
if(gid >= pre) {
in[gid] += in[gid-pre];
}
} | fbd17eabbf969df9bef7aab69317b9c1895056c9.cu | #include "includes.h"
__global__ void add(int* in, int d, int n){
int gid = threadIdx.x + blockIdx.x * blockDim.x;
if(gid >= n) return ;
int pre = (d==0) ? 1 : (2<<(d-1));
if(gid >= pre) {
in[gid] += in[gid-pre];
}
} |
903ddb4381a1284dfe5a65f72e30444125e12777.hip | // !!! This is a file automatically generated by hipify!!!
/* ==================================================================
Programmer: Yicheng Tu ([email protected])
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the rc machines
StevenFaulkner U9616-1844
Summer 2018
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
unsigned long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of width */
atom * atom_list; /* list of all data points */
struct timezone Idunno;
struct timeval startTime, endTime;
void ErrorCheck( hipError_t err, const char op[])
{
if( err != hipSuccess )
{
printf("CUDA Error: %s, %s ", op, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*
distance of two points in the atom_list
*/
__device__ double
p2p_distance(atom *list, int ind1, int ind2)
{
double x1 = list[ind1].x_pos;
double x2 = list[ind2].x_pos;
double y1 = list[ind1].y_pos;
double y2 = list[ind2].y_pos;
double z1 = list[ind1].z_pos;
double z2 = list[ind2].z_pos;
return sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2) + (z1-z2)*(z1-z2));
}
/*
brute-force SDH solution in a single CPU thread
*/
__global__ void
PDH_baseline(bucket *histo_in, atom *list, double width, int size)
{
int i, j, pos;
double distance;
i = (blockIdx.x * blockDim.x) + threadIdx.x;
j = i+1;
for(int x = j; x < size; ++x)
{
distance = p2p_distance(list,i,x);
pos = (int) (distance/width);
atomicAdd( &histo_in[pos].d_cnt,1);
}
}
__global__ void
PDHGPU_Baseline(bucket *histogram,atom *list, double width)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if(x < y)
{
double dist = p2p_distance(list,x,y);
int pos = (int) (dist/width);
histogram[pos].d_cnt++;
printf("%d,%d : %d, %f \n", x,y,pos,dist);
}
__syncthreads();
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket *histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
size_t hist_size = sizeof(bucket)*num_buckets;
size_t atom_size = sizeof(atom)*PDH_acnt;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/*
PDH_baseline();
report_running_time();
output_histogram(histogram);
*/
bucket *dev_Histo = NULL;
atom *dev_atomL = NULL;
ErrorCheck(hipMalloc((void**) &dev_Histo,hist_size), "Allocate Memory for Histogram");
ErrorCheck(hipMalloc((void**) &dev_atomL, atom_size), "Allocate Memory for Atom List");
ErrorCheck(hipMemcpy(dev_Histo,histogram,hist_size, hipMemcpyHostToDevice), "Copying Histogram to Device");
ErrorCheck(hipMemcpy(dev_atomL, atom_list, atom_size, hipMemcpyHostToDevice), "Copying Atom list to Device");
hipLaunchKernelGGL(( PDH_baseline) , dim3(ceil(PDH_acnt/32)), dim3(32) , 0, 0, dev_Histo, dev_atomL, PDH_res, PDH_acnt);
ErrorCheck(hipMemcpy(histogram, dev_Histo, hist_size, hipMemcpyDeviceToHost), " Move Histogram to host");
/* print out the histogram */
output_histogram(histogram);
ErrorCheck(hipFree(dev_Histo), "Free Device Histogram");
ErrorCheck(hipFree(dev_atomL), "Free Device Atom List");
free(histogram);
free(atom_list);
ErrorCheck(hipDeviceReset(), "Reset");
return 0;
}
| 903ddb4381a1284dfe5a65f72e30444125e12777.cu | /* ==================================================================
Programmer: Yicheng Tu ([email protected])
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the rc machines
StevenFaulkner U9616-1844
Summer 2018
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
unsigned long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of width */
atom * atom_list; /* list of all data points */
struct timezone Idunno;
struct timeval startTime, endTime;
void ErrorCheck( cudaError_t err, const char op[])
{
if( err != cudaSuccess )
{
printf("CUDA Error: %s, %s ", op, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*
distance of two points in the atom_list
*/
__device__ double
p2p_distance(atom *list, int ind1, int ind2)
{
double x1 = list[ind1].x_pos;
double x2 = list[ind2].x_pos;
double y1 = list[ind1].y_pos;
double y2 = list[ind2].y_pos;
double z1 = list[ind1].z_pos;
double z2 = list[ind2].z_pos;
return sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2) + (z1-z2)*(z1-z2));
}
/*
brute-force SDH solution in a single CPU thread
*/
__global__ void
PDH_baseline(bucket *histo_in, atom *list, double width, int size)
{
int i, j, pos;
double distance;
i = (blockIdx.x * blockDim.x) + threadIdx.x;
j = i+1;
for(int x = j; x < size; ++x)
{
distance = p2p_distance(list,i,x);
pos = (int) (distance/width);
atomicAdd( &histo_in[pos].d_cnt,1);
}
}
__global__ void
PDHGPU_Baseline(bucket *histogram,atom *list, double width)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if(x < y)
{
double dist = p2p_distance(list,x,y);
int pos = (int) (dist/width);
histogram[pos].d_cnt++;
printf("%d,%d : %d, %f \n", x,y,pos,dist);
}
__syncthreads();
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket *histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
size_t hist_size = sizeof(bucket)*num_buckets;
size_t atom_size = sizeof(atom)*PDH_acnt;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/*
PDH_baseline();
report_running_time();
output_histogram(histogram);
*/
bucket *dev_Histo = NULL;
atom *dev_atomL = NULL;
ErrorCheck(cudaMalloc((void**) &dev_Histo,hist_size), "Allocate Memory for Histogram");
ErrorCheck(cudaMalloc((void**) &dev_atomL, atom_size), "Allocate Memory for Atom List");
ErrorCheck(cudaMemcpy(dev_Histo,histogram,hist_size, cudaMemcpyHostToDevice), "Copying Histogram to Device");
ErrorCheck(cudaMemcpy(dev_atomL, atom_list, atom_size, cudaMemcpyHostToDevice), "Copying Atom list to Device");
PDH_baseline <<<ceil(PDH_acnt/32), 32 >>> (dev_Histo, dev_atomL, PDH_res, PDH_acnt);
ErrorCheck(cudaMemcpy(histogram, dev_Histo, hist_size, cudaMemcpyDeviceToHost), " Move Histogram to host");
/* print out the histogram */
output_histogram(histogram);
ErrorCheck(cudaFree(dev_Histo), "Free Device Histogram");
ErrorCheck(cudaFree(dev_atomL), "Free Device Atom List");
free(histogram);
free(atom_list);
ErrorCheck(cudaDeviceReset(), "Reset");
return 0;
}
|
75a5c9b3ca154f34dd2ad597d8ababc8b7b7cdfe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void addKernel()
{
__shared__ int Sum1[10][10];
int Sum[10][10];
if( threadIdx.x ==0 && threadIdx.y ==0){
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
Sum1[i][j] = 0;
}
}
}
__syncthreads();
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
Sum[i][j] = (i+1)*(j+1);
atomicAdd(&Sum1[i][j], Sum[i][j]);
}
}
__syncthreads();
if( threadIdx.x ==0 && threadIdx.y ==0){
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
printf("%d, ", Sum1[i][j]);
}
}
}
}
int main()
{
hipLaunchKernelGGL(( addKernel), dim3(1),dim3(16), 0, 0, );
return 0;
}
| 75a5c9b3ca154f34dd2ad597d8ababc8b7b7cdfe.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void addKernel()
{
__shared__ int Sum1[10][10];
int Sum[10][10];
if( threadIdx.x ==0 && threadIdx.y ==0){
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
Sum1[i][j] = 0;
}
}
}
__syncthreads();
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
Sum[i][j] = (i+1)*(j+1);
atomicAdd(&Sum1[i][j], Sum[i][j]);
}
}
__syncthreads();
if( threadIdx.x ==0 && threadIdx.y ==0){
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
printf("%d, ", Sum1[i][j]);
}
}
}
}
int main()
{
addKernel<<<1,16>>>();
return 0;
}
|
33d08e13ebc66d8be1bbffe625d855262029c910.hip | // !!! This is a file automatically generated by hipify!!!
// Variable num_cores denotes the number of threads to run the code on.
#include <stdio.h>
//#include <omp.h>
#include <string.h>
#include <math.h>
//#include "../common/common.h"
#include <hip/hip_runtime.h>
/*
* compute string value, length should be small than strlen
*/
int compute_value(char *str, int length, int d, int q)
{
int i = 0;
int p0 = 0;
for (i = 0; i < length; ++i) {
p0 = (d * p0 + (str[i] /*- '0'*/ )) % q;
}
return p0;
}
int rk_matcher(char *str, char *pattern, int d, int q)
{
int i = 0, j = 0;
int str_length = strlen(str);
int pattern_length = strlen(pattern);
int p0 = 0;
int ts[str_length];
/* This code block prints what is inside the matrix
for (i=0;i<num_cores;i++)
{
for (j=0;j<el_chunk_len;j++)
if (tss[i][j]==0)
printf("%c", '0');
else
printf("%c", tss[i][j]);
printf("\n");
}
*/
//hash value of the pattern
p0 = compute_value(pattern, pattern_length, d, q);
//hash value of the first char
ts[0] = compute_value(str, pattern_length, d, q);
//p does not change, calculate once
int p = pow(d, pattern_length - 1);
for (i = 1; i < str_length - pattern_length + 1; i++) {
ts[i] = ((str[i + pattern_length - 1]) * p
+ (ts[i - 1] - (str[i - 1])) / d) % q;
/* (ts[i - 1] * d -
((str[i - 1] - '0') * (int) pow(d,
pattern_length))) % q +
(str[i + pattern_length - 1]
- '0') % q; */
}
/* for (i=0;i<str_length-pattern_length+1;i++)
{
printf("%d ", ts[i]);
}*/
for (i = 0; i <= str_length - pattern_length + 1; ++i) {
if (ts[i] == p0) {
for (j = 0; j < pattern_length; ++j) {
if (pattern[j] != str[i + j]) {
break;
} else if (j == pattern_length - 1) {
printf("%d\n", i);
}
}
}
}
return 0;
}
__global__ void findHashes(char *d_css, int d_len, int *d_iss,
int pattern_length, int d, /*int q,*/ int p)
{
int i = 0;
int ind = d_len * threadIdx.x;
d_iss += ind;
d_css += ind;
d_iss[0] = 0;
// printf("%d %d %d %d %d %d", d_iss[0], d_len, pattern_length, d, q, p);
int pw = 1;
for (; i < pattern_length; i++) {
d_iss[0] += pw * (d_css[i]);
pw *= d;
}
//d_iss[0] %= q;
//printf("%d ", d_iss[0]);
for (i = 1; i < d_len - pattern_length + 1; i++) {
d_iss[i] = ((d_css[i + pattern_length - 1]) * p
+ (d_iss[i - 1] - (d_css[i - 1])) / d); //% q;
//printf("%d ",d_iss[i]);
}
}
__global__ void seekPattern(char *d_css, int d_len, int *d_iss,
int pattern_length, char* pattern, int d, int p0)
{
int i = 0;
int j=0;
int ind = d_len * threadIdx.x;
d_iss += ind;
d_css += ind;
for (i = 0; i < d_len - pattern_length + 1; i++) {
if (d_iss[i] == p0) {
for (j = 0; j < pattern_length; j++) {
if (pattern[j] != d_css[i + j]) {
break;
} else if (j == pattern_length - 1) {
// printf("ThreadId: %d\n", threadIdx.x);
printf("pos:%d\n", threadIdx.x*(d_len-pattern_length+1)+i-pattern_length+1);
}
}
}
}
}
int main(int argc, char *argv[])
{
int i = 0;
int j = 0;
char str[] = "bababanaparaverbababanaparaverbababanaparaverbababanaparaverbababanaparaverbababanaparaverbababanaparaver";
char pattern[] = "aba";
int d = 3;
//int q = 50000;
int num_cores = 8;
//CHECK(hipDeviceReset());
int str_length = strlen(str);
//int nElem=str_length;
int pattern_length = strlen(pattern);
int chunk_len = (int)ceil((float)str_length / num_cores);
int padding_len = chunk_len * num_cores - str_length;
int el_chunk_len = chunk_len + pattern_length - 1;
//matrix on host which holds the characters, each row will go to a core
char css[num_cores][el_chunk_len];
int iss[num_cores][el_chunk_len];
//on the device
char *d_css;
char *d_pattern;
//hashes on the device
int *d_iss;
int nchars = num_cores * el_chunk_len;
hipMalloc((char **)&d_css, nchars * sizeof(char));
hipMalloc((int **)&d_iss, nchars * sizeof(int));
hipMalloc((char **)&d_pattern, pattern_length*sizeof(char));
//initial zeroes
for (i = 0; i < pattern_length - 1; i++)
css[0][i] = 0;
//first n-1 cores' characters
for (i = 0; i < num_cores - 1; i++)
for (j = 0; j < chunk_len; j++)
css[i][j + pattern_length - 1] = str[i * chunk_len + j];
//last core's characters
for (i = (num_cores - 1) * chunk_len, j = 0; i < str_length; i++, j++)
css[num_cores - 1][j + pattern_length - 1] = str[i];
//last n-1 cores' padding characters
for (i = 1; i < num_cores; i++)
for (j = 0; j < pattern_length - 1; j++)
css[i][j] = css[i - 1][j + chunk_len];
//last core's last paddings
for (i = 0; i < padding_len; i++)
css[num_cores - 1][el_chunk_len - i - 1] = 0;
//transfer css to device
hipMemcpy(d_css, css, nchars, hipMemcpyHostToDevice);
hipMemcpy(d_css, css, nchars, hipMemcpyHostToDevice);
hipMemcpy(d_pattern, pattern, pattern_length, hipMemcpyHostToDevice);
dim3 block(num_cores); //str_length/pattern_length
//__global__ void findHashes(char *d_css, int d_len, int *d_iss, int pattern_length, int d, int q, int p)
int p = pow(d, pattern_length - 1);
hipLaunchKernelGGL(( findHashes) , dim3(1), dim3(num_cores) , 0, 0, d_css, el_chunk_len, d_iss,
pattern_length, d, /*q,*/ p);
//find the hash of the pattern
int pw = 1;
int p0=0;
for (i=0; i < pattern_length; i++) {
p0 += pw * (pattern[i]);
pw *= d;
}
//printf("%d\n", p0);
hipLaunchKernelGGL(( seekPattern), dim3(1), dim3(num_cores), 0, 0, d_css, el_chunk_len, d_iss,
pattern_length, d_pattern, d, p0);
//printf("%d %d %d %d %d \n", el_chunk_len, pattern_length, d, q, p);
//hipMemcpy(iss, d_iss, nchars * sizeof(int), hipMemcpyDeviceToHost);
/*for (i=0;i<num_cores;i++)
{
for (j=0;j<el_chunk_len;j++)
printf("%d ", iss[i][j]);
printf("\n");
}
*/
hipFree(d_iss);
hipFree(d_css);
//int pos = rk_matcher(str, pattern, d, q);
//printf("%d", pos);
return 0;
} | 33d08e13ebc66d8be1bbffe625d855262029c910.cu | // Variable num_cores denotes the number of threads to run the code on.
#include <stdio.h>
//#include <omp.h>
#include <string.h>
#include <math.h>
//#include "../common/common.h"
#include <cuda_runtime.h>
/*
* compute string value, length should be small than strlen
*/
int compute_value(char *str, int length, int d, int q)
{
int i = 0;
int p0 = 0;
for (i = 0; i < length; ++i) {
p0 = (d * p0 + (str[i] /*- '0'*/ )) % q;
}
return p0;
}
int rk_matcher(char *str, char *pattern, int d, int q)
{
int i = 0, j = 0;
int str_length = strlen(str);
int pattern_length = strlen(pattern);
int p0 = 0;
int ts[str_length];
/* This code block prints what is inside the matrix
for (i=0;i<num_cores;i++)
{
for (j=0;j<el_chunk_len;j++)
if (tss[i][j]==0)
printf("%c", '0');
else
printf("%c", tss[i][j]);
printf("\n");
}
*/
//hash value of the pattern
p0 = compute_value(pattern, pattern_length, d, q);
//hash value of the first char
ts[0] = compute_value(str, pattern_length, d, q);
//p does not change, calculate once
int p = pow(d, pattern_length - 1);
for (i = 1; i < str_length - pattern_length + 1; i++) {
ts[i] = ((str[i + pattern_length - 1]) * p
+ (ts[i - 1] - (str[i - 1])) / d) % q;
/* (ts[i - 1] * d -
((str[i - 1] - '0') * (int) pow(d,
pattern_length))) % q +
(str[i + pattern_length - 1]
- '0') % q; */
}
/* for (i=0;i<str_length-pattern_length+1;i++)
{
printf("%d ", ts[i]);
}*/
for (i = 0; i <= str_length - pattern_length + 1; ++i) {
if (ts[i] == p0) {
for (j = 0; j < pattern_length; ++j) {
if (pattern[j] != str[i + j]) {
break;
} else if (j == pattern_length - 1) {
printf("%d\n", i);
}
}
}
}
return 0;
}
__global__ void findHashes(char *d_css, int d_len, int *d_iss,
int pattern_length, int d, /*int q,*/ int p)
{
int i = 0;
int ind = d_len * threadIdx.x;
d_iss += ind;
d_css += ind;
d_iss[0] = 0;
// printf("%d %d %d %d %d %d", d_iss[0], d_len, pattern_length, d, q, p);
int pw = 1;
for (; i < pattern_length; i++) {
d_iss[0] += pw * (d_css[i]);
pw *= d;
}
//d_iss[0] %= q;
//printf("%d ", d_iss[0]);
for (i = 1; i < d_len - pattern_length + 1; i++) {
d_iss[i] = ((d_css[i + pattern_length - 1]) * p
+ (d_iss[i - 1] - (d_css[i - 1])) / d); //% q;
//printf("%d ",d_iss[i]);
}
}
__global__ void seekPattern(char *d_css, int d_len, int *d_iss,
int pattern_length, char* pattern, int d, int p0)
{
int i = 0;
int j=0;
int ind = d_len * threadIdx.x;
d_iss += ind;
d_css += ind;
for (i = 0; i < d_len - pattern_length + 1; i++) {
if (d_iss[i] == p0) {
for (j = 0; j < pattern_length; j++) {
if (pattern[j] != d_css[i + j]) {
break;
} else if (j == pattern_length - 1) {
// printf("ThreadId: %d\n", threadIdx.x);
printf("pos:%d\n", threadIdx.x*(d_len-pattern_length+1)+i-pattern_length+1);
}
}
}
}
}
int main(int argc, char *argv[])
{
int i = 0;
int j = 0;
char str[] = "bababanaparaverbababanaparaverbababanaparaverbababanaparaverbababanaparaverbababanaparaverbababanaparaver";
char pattern[] = "aba";
int d = 3;
//int q = 50000;
int num_cores = 8;
//CHECK(cudaDeviceReset());
int str_length = strlen(str);
//int nElem=str_length;
int pattern_length = strlen(pattern);
int chunk_len = (int)ceil((float)str_length / num_cores);
int padding_len = chunk_len * num_cores - str_length;
int el_chunk_len = chunk_len + pattern_length - 1;
//matrix on host which holds the characters, each row will go to a core
char css[num_cores][el_chunk_len];
int iss[num_cores][el_chunk_len];
//on the device
char *d_css;
char *d_pattern;
//hashes on the device
int *d_iss;
int nchars = num_cores * el_chunk_len;
cudaMalloc((char **)&d_css, nchars * sizeof(char));
cudaMalloc((int **)&d_iss, nchars * sizeof(int));
cudaMalloc((char **)&d_pattern, pattern_length*sizeof(char));
//initial zeroes
for (i = 0; i < pattern_length - 1; i++)
css[0][i] = 0;
//first n-1 cores' characters
for (i = 0; i < num_cores - 1; i++)
for (j = 0; j < chunk_len; j++)
css[i][j + pattern_length - 1] = str[i * chunk_len + j];
//last core's characters
for (i = (num_cores - 1) * chunk_len, j = 0; i < str_length; i++, j++)
css[num_cores - 1][j + pattern_length - 1] = str[i];
//last n-1 cores' padding characters
for (i = 1; i < num_cores; i++)
for (j = 0; j < pattern_length - 1; j++)
css[i][j] = css[i - 1][j + chunk_len];
//last core's last paddings
for (i = 0; i < padding_len; i++)
css[num_cores - 1][el_chunk_len - i - 1] = 0;
//transfer css to device
cudaMemcpy(d_css, css, nchars, cudaMemcpyHostToDevice);
cudaMemcpy(d_css, css, nchars, cudaMemcpyHostToDevice);
cudaMemcpy(d_pattern, pattern, pattern_length, cudaMemcpyHostToDevice);
dim3 block(num_cores); //str_length/pattern_length
//__global__ void findHashes(char *d_css, int d_len, int *d_iss, int pattern_length, int d, int q, int p)
int p = pow(d, pattern_length - 1);
findHashes <<< 1, num_cores >>> (d_css, el_chunk_len, d_iss,
pattern_length, d, /*q,*/ p);
//find the hash of the pattern
int pw = 1;
int p0=0;
for (i=0; i < pattern_length; i++) {
p0 += pw * (pattern[i]);
pw *= d;
}
//printf("%d\n", p0);
seekPattern<<<1, num_cores>>>(d_css, el_chunk_len, d_iss,
pattern_length, d_pattern, d, p0);
//printf("%d %d %d %d %d \n", el_chunk_len, pattern_length, d, q, p);
//cudaMemcpy(iss, d_iss, nchars * sizeof(int), cudaMemcpyDeviceToHost);
/*for (i=0;i<num_cores;i++)
{
for (j=0;j<el_chunk_len;j++)
printf("%d ", iss[i][j]);
printf("\n");
}
*/
cudaFree(d_iss);
cudaFree(d_css);
//int pos = rk_matcher(str, pattern, d, q);
//printf("%d", pos);
return 0;
} |
f8b0c70eb97d9a7e7d820a203310bfdb857de1a2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* 2mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size. */
# define NI 2048
# define NJ 2048
# define NK 2048
# define NL 2048
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NI + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NL; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
void compareResults(DATA_TYPE *E, DATA_TYPE *E_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=0; i < NL; i++)
{
for (j=0; j < NI; j++)
{
if (percentDiff(E[i*NI + j], E_outputFromGpu[i*NI + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void mm2_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for (k = 0; k < NK; k++)
{
C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm2_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for (k = 0; k < NJ; k++)
{
E[i * NL + j] += C[i * NJ + k] * D[k * NL + j];
}
}
}
void mm2_cpu(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E)
{
int i, j, k;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NJ + j] = 0.0;
for (k = 0; k < NK; ++k)
{
C[i*NJ + j] += A[i*NK + k] * B[k*NJ + j];
}
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NL; j++)
{
E[i*NL + j] = 0.0;
for (k = 0; k < NJ; ++k)
{
E[i*NL + j] += C[i*NJ + k] * D[k*NL + j];
}
}
}
}
void mm2Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E, DATA_TYPE* E_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
DATA_TYPE *D_gpu;
DATA_TYPE *E_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ);
hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ);
hipMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NJ * NL);
hipMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NL);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice);
hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice);
hipMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NJ * NL, hipMemcpyHostToDevice);
hipMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NL, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)ceil( ((float)NJ) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) );
dim3 grid2((size_t)ceil( ((float)NL) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) );
t_start = rtclock();
hipLaunchKernelGGL(( mm2_kernel1), dim3(grid1),dim3(block), 0, 0, A_gpu, B_gpu, C_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( mm2_kernel2), dim3(grid2),dim3(block), 0, 0, C_gpu, D_gpu, E_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
hipMemcpy(E_outputFromGpu, E_gpu, sizeof(DATA_TYPE) * NI * NL, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(B_gpu);
hipFree(C_gpu);
hipFree(D_gpu);
hipFree(E_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* C;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* E_outputFromGpu;
C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE));
D = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE));
E = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
E_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
init_array(A, B, C, D);
GPU_argv_init();
mm2Cuda(A, B, C, D, E, E_outputFromGpu);
t_start = rtclock();
mm2_cpu(A, B, C, D, E);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(E, E_outputFromGpu);
free(C);
free(A);
free(B);
free(D);
free(E);
free(E_outputFromGpu);
return 0;
}
| f8b0c70eb97d9a7e7d820a203310bfdb857de1a2.cu | /**
* 2mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size. */
# define NI 2048
# define NJ 2048
# define NK 2048
# define NL 2048
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NI + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NL; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
void compareResults(DATA_TYPE *E, DATA_TYPE *E_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=0; i < NL; i++)
{
for (j=0; j < NI; j++)
{
if (percentDiff(E[i*NI + j], E_outputFromGpu[i*NI + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void mm2_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for (k = 0; k < NK; k++)
{
C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm2_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for (k = 0; k < NJ; k++)
{
E[i * NL + j] += C[i * NJ + k] * D[k * NL + j];
}
}
}
void mm2_cpu(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E)
{
int i, j, k;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NJ + j] = 0.0;
for (k = 0; k < NK; ++k)
{
C[i*NJ + j] += A[i*NK + k] * B[k*NJ + j];
}
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NL; j++)
{
E[i*NL + j] = 0.0;
for (k = 0; k < NJ; ++k)
{
E[i*NL + j] += C[i*NJ + k] * D[k*NL + j];
}
}
}
}
void mm2Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E, DATA_TYPE* E_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
DATA_TYPE *D_gpu;
DATA_TYPE *E_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ);
cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ);
cudaMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NJ * NL);
cudaMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NL);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice);
cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice);
cudaMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NJ * NL, cudaMemcpyHostToDevice);
cudaMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)ceil( ((float)NJ) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) );
dim3 grid2((size_t)ceil( ((float)NL) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) );
t_start = rtclock();
mm2_kernel1<<<grid1,block>>>(A_gpu, B_gpu, C_gpu);
cudaThreadSynchronize();
mm2_kernel2<<<grid2,block>>>(C_gpu, D_gpu, E_gpu);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(E_outputFromGpu, E_gpu, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
cudaFree(D_gpu);
cudaFree(E_gpu);
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* C;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* E_outputFromGpu;
C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE));
D = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE));
E = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
E_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
init_array(A, B, C, D);
GPU_argv_init();
mm2Cuda(A, B, C, D, E, E_outputFromGpu);
t_start = rtclock();
mm2_cpu(A, B, C, D, E);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(E, E_outputFromGpu);
free(C);
free(A);
free(B);
free(D);
free(E);
free(E_outputFromGpu);
return 0;
}
|
a7d8fc13266bf4a7cca062f5d4b0a6d9b0b329dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/segment_pooling.h"
namespace phi {
namespace funcs {
using Tensor = DenseTensor;
template <typename T, typename Index, int DimTileSize>
__global__ void SegmentSumIdsKernel(const Index* segment_ids,
T* summed_ids,
const Index input_length_size,
const Index total_stripe_count) {
CUDA_KERNEL_LOOP(stripe_index, total_stripe_count) {
const Index segment_offset = stripe_index;
const Index dim_index_base = stripe_index * Index(DimTileSize);
const Index actual_height =
min(Index(DimTileSize), input_length_size - dim_index_base);
Index first_segment_id = segment_ids[dim_index_base];
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
T sum = T(0);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
PADDLE_ENFORCE(current_segment_id >= last_segment_id,
"the segment ids should be sorted, but got "
"segment_ids[%d]:%d > segment_ids[%d]:%d.",
dim_index_base + j - 1,
dim_index_base + j,
last_segment_id,
current_segment_id);
if (current_segment_id > last_segment_id) {
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id;
++interval_id) {
*(summed_ids + interval_id) = 0;
}
if (j > 0) {
if (last_segment_id == first_segment_id) {
phi::CudaAtomicAdd(summed_ids + last_segment_id, sum);
} else {
*(summed_ids + last_segment_id) = sum;
}
sum = T(0);
}
}
sum += T(1);
last_segment_id = current_segment_id;
}
phi::CudaAtomicAdd(summed_ids + last_segment_id, sum);
}
}
template <typename T, typename Index, int DimTileSize>
__global__ void SegmentMeanKernel(const Index* segment_ids,
const T* input,
T* output,
T* summed_ids,
const Index input_length_size,
const Index inner_dim_size,
const Index output_length_size,
const Index total_stripe_count) {
CUDA_KERNEL_LOOP(stripe_index, total_stripe_count) {
const Index segment_offset = stripe_index % inner_dim_size;
const Index dim_index_base =
stripe_index / inner_dim_size * Index(DimTileSize);
const Index actual_height =
min(Index(DimTileSize), input_length_size - dim_index_base);
Index first_segment_id = segment_ids[dim_index_base];
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
T sum = T(0);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
if (current_segment_id > last_segment_id) {
// reset the interval value which do not have corresponding ids.
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id;
++interval_id) {
*(output + interval_id * inner_dim_size + segment_offset) = T(0);
}
if (j > 0) {
Index output_index =
last_segment_id * inner_dim_size + segment_offset;
if (last_segment_id == first_segment_id) {
phi::CudaAtomicAdd(output + output_index,
sum / *(summed_ids + last_segment_id));
} else {
*(output + output_index) = sum / *(summed_ids + last_segment_id);
}
sum = T(0);
}
}
sum += input[(dim_index_base + j) * inner_dim_size + segment_offset];
last_segment_id = current_segment_id;
}
Index output_index = last_segment_id * inner_dim_size + segment_offset;
phi::CudaAtomicAdd(output + output_index,
sum / *(summed_ids + last_segment_id));
}
}
template <typename T, typename Index, typename Helper, typename Pool>
__global__ void __launch_bounds__(1024, 1) SegmentOpsKernel(
const Index* segment_ids, const T* input, T* output, Helper h, Pool pool) {
CUDA_KERNEL_LOOP(stripe_index, h.total_stripe_count) {
Index segment_offset, dim_index_base, actual_height;
Index inner_dim_size = h.inner_dim_size;
h.calculate(stripe_index, &segment_offset, &dim_index_base, &actual_height);
T minmax = pool.initial();
Index first_segment_id = segment_ids[dim_index_base];
// -1 is for the start value when interval_id = 0
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
// ensure the segment_ids is sorted.
PADDLE_ENFORCE(current_segment_id >= last_segment_id,
"The segment ids should be sorted, but got "
"segment_ids[%d]:%d > segment_ids[%d]:%d.",
dim_index_base + j - 1,
dim_index_base + j,
last_segment_id,
current_segment_id);
if (current_segment_id > last_segment_id) {
// reset the interval value which do not have corresponding ids.
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id;
++interval_id) {
*(output + interval_id * inner_dim_size + segment_offset) = T(0);
}
// don't update result when j=0
if (j > 0) {
const Index output_index =
last_segment_id * inner_dim_size + segment_offset;
if (last_segment_id == first_segment_id) {
pool.atomic(output + output_index, minmax);
} else {
*(output + output_index) = minmax;
}
minmax = pool.initial();
}
}
pool.compute(
input[(dim_index_base + j) * inner_dim_size + segment_offset],
&minmax);
last_segment_id = current_segment_id;
}
const Index output_index =
last_segment_id * inner_dim_size + segment_offset;
pool.atomic(output + output_index, minmax);
}
}
template <typename T, typename Index, typename Helper>
__global__ void SegmentIndexGradKernel(const Index* segment_ids,
const T* input,
const T* output,
const T* out_grad,
T* in_grad,
Helper h) {
CUDA_KERNEL_LOOP(stripe_index, h.total_stripe_count) {
Index segment_offset, dim_index_base, actual_height;
h.calculate(stripe_index, &segment_offset, &dim_index_base, &actual_height);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
Index input_index =
(dim_index_base + j) * h.inner_dim_size + segment_offset;
Index output_index =
current_segment_id * h.inner_dim_size + segment_offset;
if (input[input_index] == output[output_index]) {
in_grad[input_index] = out_grad[output_index];
}
}
}
}
template <class T>
class MaxPool {
public:
DEVICE inline T initial() { return static_cast<T>(-FLT_MAX); }
DEVICE inline void compute(const T& x, T* y) { *y = *y > x ? *y : x; }
DEVICE inline T atomic(T* address, const T val) {
return phi::CudaAtomicMax(address, val);
}
};
template <class T>
class MinPool {
public:
DEVICE inline T initial() { return static_cast<T>(FLT_MAX); }
DEVICE inline void compute(const T& x, T* y) { *y = *y < x ? *y : x; }
DEVICE inline T atomic(T* address, const T val) {
return phi::CudaAtomicMin(address, val);
}
};
template <class T>
class SumPool {
public:
DEVICE inline T initial() { return static_cast<T>(0); }
DEVICE inline void compute(const T& x, T* y) { *y = *y + x; }
DEVICE inline T atomic(T* address, const T val) {
return phi::CudaAtomicAdd(address, val);
}
};
template <class T>
class ArrangeHelper {
public:
const T input_total_size;
const T input_length_size;
const T output_length_size;
T inner_dim_size;
T total_stripe_count;
const T DimTileSize = 8;
ArrangeHelper(T a, T b, T c)
: input_total_size(a), input_length_size(b), output_length_size(c) {
T input_outer_dim_num_stripe =
(input_length_size + DimTileSize - 1) / DimTileSize;
inner_dim_size = input_total_size / input_length_size;
total_stripe_count = inner_dim_size * input_outer_dim_num_stripe;
}
DEVICE inline void calculate(T stripe_index,
T* segment_offset,
T* dim_index_base,
T* actual_height) {
*segment_offset = stripe_index % inner_dim_size;
*dim_index_base = stripe_index / inner_dim_size * DimTileSize;
*actual_height = min(DimTileSize, input_length_size - *dim_index_base);
}
};
template <typename T, typename Index>
void SegmentPoolCUDAGradFunctor(const phi::GPUContext& ctx,
const DenseTensor& input,
const DenseTensor& segment_ids,
const DenseTensor& output,
const DenseTensor& out_grad,
DenseTensor* in_grad,
const std::string pooltype = "SUM") {
auto h = ArrangeHelper<Index>(
input.numel(), segment_ids.dims()[0], output.dims()[0]);
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(ctx, h.total_stripe_count);
if (pooltype == "MAX" || pooltype == "MIN") {
hipLaunchKernelGGL(( SegmentIndexGradKernel<T, Index, ArrangeHelper<Index>>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
ctx.stream(), segment_ids.data<Index>(),
input.data<T>(),
output.data<T>(),
out_grad.data<T>(),
in_grad->data<T>(),
h);
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Unsupported segment pooling grad operation, Only MAX, MIN "
"available, but got %s.",
pooltype));
}
}
template <typename T>
__global__ void SimpleDiv(T* x, const T* y, const int len, const int dim) {
for (int i = blockIdx.x; i < len; i += gridDim.x) {
__shared__ T y_i;
auto base = i * dim;
if (threadIdx.x == 0) {
y_i = y[i];
}
__syncthreads();
for (int j = threadIdx.x; j < dim; j += blockDim.x) {
x[base + j] /= y_i;
}
}
}
template <typename T, typename IndexT>
class SegmentPoolFunctor<phi::GPUContext, T, IndexT> {
public:
void operator()(const phi::GPUContext& ctx,
const DenseTensor& input,
const DenseTensor& segment_ids,
DenseTensor* output,
DenseTensor* summed_ids = nullptr,
const std::string pooltype = "SUM") {
if (pooltype == "MEAN") {
// Sum the segment id num first
IndexT DimTileSize = 8;
auto input_length_size = segment_ids.numel();
auto total_stripe_count =
(input_length_size + DimTileSize - 1) / DimTileSize;
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(ctx, total_stripe_count);
hipLaunchKernelGGL(( SegmentSumIdsKernel<T, IndexT, IndexT(8)>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
ctx.stream(), segment_ids.data<IndexT>(),
summed_ids->data<T>(),
input_length_size,
total_stripe_count);
}
auto h = ArrangeHelper<IndexT>(
input.numel(), segment_ids.dims()[0], output->dims()[0]);
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(ctx, h.total_stripe_count);
if (pooltype == "MEAN") {
hipLaunchKernelGGL(( SegmentMeanKernel<T, IndexT, IndexT(8)>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
ctx.stream(), segment_ids.data<IndexT>(),
input.data<T>(),
output->data<T>(),
summed_ids->data<T>(),
h.input_length_size,
h.inner_dim_size,
h.output_length_size,
h.total_stripe_count);
} else if (pooltype == "SUM") {
SumPool<T> pool;
hipLaunchKernelGGL(( SegmentOpsKernel<T, IndexT, ArrangeHelper<IndexT>, SumPool<T>>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
ctx.stream(), segment_ids.data<IndexT>(),
input.data<T>(),
output->data<T>(),
h,
pool);
} else if (pooltype == "MAX") {
MaxPool<T> pool;
hipLaunchKernelGGL(( SegmentOpsKernel<T, IndexT, ArrangeHelper<IndexT>, MaxPool<T>>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
ctx.stream(), segment_ids.data<IndexT>(),
input.data<T>(),
output->data<T>(),
h,
pool);
} else if (pooltype == "MIN") {
MinPool<T> pool;
hipLaunchKernelGGL(( SegmentOpsKernel<T, IndexT, ArrangeHelper<IndexT>, MinPool<T>>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
ctx.stream(), segment_ids.data<IndexT>(),
input.data<T>(),
output->data<T>(),
h,
pool);
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Unsupported segment pooling operation, Only MEAN, SUM, MAX, MIN "
"available, but got %s.",
pooltype));
}
}
};
template <typename T, typename IndexT>
class SegmentPoolGradFunctor<phi::GPUContext, T, IndexT> {
public:
void operator()(const phi::GPUContext& dev_ctx,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& out_grad,
const DenseTensor& segments,
DenseTensor* in_grad,
const paddle::optional<DenseTensor>& summed_ids,
const std::string pooltype = "SUM") {
if (pooltype == "MAX" || pooltype == "MIN") {
SegmentPoolCUDAGradFunctor<T, IndexT>(
dev_ctx, input, segments, output, out_grad, in_grad, pooltype);
} else if (pooltype == "MEAN") {
DenseTensor mean_grad;
mean_grad.Resize(input.dims());
dev_ctx.template Alloc<T>(&mean_grad);
phi::Copy(dev_ctx, out_grad, dev_ctx.GetPlace(), false, &mean_grad);
int len = output.dims()[0];
int dim = output.numel() / len;
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, len);
hipLaunchKernelGGL(( SimpleDiv<T>), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(),
mean_grad.data<T>(), summed_ids->data<T>(), len, dim);
phi::funcs::GPUGather<T, IndexT>(dev_ctx, mean_grad, segments, in_grad);
} else if (pooltype == "SUM") {
phi::funcs::GPUGather<T, IndexT>(dev_ctx, out_grad, segments, in_grad);
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Unsupported segment pooling operation, Only MEAN, SUM, MAX, MIN "
"available, but got %s.",
pooltype));
}
}
};
using GPU = phi::GPUContext;
using float16 = phi::dtype::float16;
template class SegmentPoolFunctor<GPU, float, int>;
template class SegmentPoolFunctor<GPU, float, int64_t>;
template class SegmentPoolFunctor<GPU, double, int>;
template class SegmentPoolFunctor<GPU, double, int64_t>;
template class SegmentPoolFunctor<GPU, int, int>;
template class SegmentPoolFunctor<GPU, int, int64_t>;
template class SegmentPoolFunctor<GPU, int64_t, int>;
template class SegmentPoolFunctor<GPU, int64_t, int64_t>;
template class SegmentPoolFunctor<GPU, float16, int>;
template class SegmentPoolFunctor<GPU, float16, int64_t>;
template class SegmentPoolFunctor<GPU, phi::dtype::bfloat16, int>;
template class SegmentPoolFunctor<GPU, phi::dtype::bfloat16, int64_t>;
template class SegmentPoolGradFunctor<GPU, float, int>;
template class SegmentPoolGradFunctor<GPU, float, int64_t>;
template class SegmentPoolGradFunctor<GPU, double, int>;
template class SegmentPoolGradFunctor<GPU, double, int64_t>;
template class SegmentPoolGradFunctor<GPU, int, int>;
template class SegmentPoolGradFunctor<GPU, int, int64_t>;
template class SegmentPoolGradFunctor<GPU, int64_t, int>;
template class SegmentPoolGradFunctor<GPU, int64_t, int64_t>;
template class SegmentPoolGradFunctor<GPU, float16, int>;
template class SegmentPoolGradFunctor<GPU, float16, int64_t>;
template class SegmentPoolGradFunctor<GPU, phi::dtype::bfloat16, int>;
template class SegmentPoolGradFunctor<GPU, phi::dtype::bfloat16, int64_t>;
} // namespace funcs
} // namespace phi
| a7d8fc13266bf4a7cca062f5d4b0a6d9b0b329dc.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/segment_pooling.h"
namespace phi {
namespace funcs {
using Tensor = DenseTensor;
template <typename T, typename Index, int DimTileSize>
__global__ void SegmentSumIdsKernel(const Index* segment_ids,
T* summed_ids,
const Index input_length_size,
const Index total_stripe_count) {
CUDA_KERNEL_LOOP(stripe_index, total_stripe_count) {
const Index segment_offset = stripe_index;
const Index dim_index_base = stripe_index * Index(DimTileSize);
const Index actual_height =
min(Index(DimTileSize), input_length_size - dim_index_base);
Index first_segment_id = segment_ids[dim_index_base];
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
T sum = T(0);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
PADDLE_ENFORCE(current_segment_id >= last_segment_id,
"the segment ids should be sorted, but got "
"segment_ids[%d]:%d > segment_ids[%d]:%d.",
dim_index_base + j - 1,
dim_index_base + j,
last_segment_id,
current_segment_id);
if (current_segment_id > last_segment_id) {
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id;
++interval_id) {
*(summed_ids + interval_id) = 0;
}
if (j > 0) {
if (last_segment_id == first_segment_id) {
phi::CudaAtomicAdd(summed_ids + last_segment_id, sum);
} else {
*(summed_ids + last_segment_id) = sum;
}
sum = T(0);
}
}
sum += T(1);
last_segment_id = current_segment_id;
}
phi::CudaAtomicAdd(summed_ids + last_segment_id, sum);
}
}
template <typename T, typename Index, int DimTileSize>
__global__ void SegmentMeanKernel(const Index* segment_ids,
const T* input,
T* output,
T* summed_ids,
const Index input_length_size,
const Index inner_dim_size,
const Index output_length_size,
const Index total_stripe_count) {
CUDA_KERNEL_LOOP(stripe_index, total_stripe_count) {
const Index segment_offset = stripe_index % inner_dim_size;
const Index dim_index_base =
stripe_index / inner_dim_size * Index(DimTileSize);
const Index actual_height =
min(Index(DimTileSize), input_length_size - dim_index_base);
Index first_segment_id = segment_ids[dim_index_base];
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
T sum = T(0);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
if (current_segment_id > last_segment_id) {
// reset the interval value which do not have corresponding ids.
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id;
++interval_id) {
*(output + interval_id * inner_dim_size + segment_offset) = T(0);
}
if (j > 0) {
Index output_index =
last_segment_id * inner_dim_size + segment_offset;
if (last_segment_id == first_segment_id) {
phi::CudaAtomicAdd(output + output_index,
sum / *(summed_ids + last_segment_id));
} else {
*(output + output_index) = sum / *(summed_ids + last_segment_id);
}
sum = T(0);
}
}
sum += input[(dim_index_base + j) * inner_dim_size + segment_offset];
last_segment_id = current_segment_id;
}
Index output_index = last_segment_id * inner_dim_size + segment_offset;
phi::CudaAtomicAdd(output + output_index,
sum / *(summed_ids + last_segment_id));
}
}
template <typename T, typename Index, typename Helper, typename Pool>
__global__ void __launch_bounds__(1024, 1) SegmentOpsKernel(
const Index* segment_ids, const T* input, T* output, Helper h, Pool pool) {
CUDA_KERNEL_LOOP(stripe_index, h.total_stripe_count) {
Index segment_offset, dim_index_base, actual_height;
Index inner_dim_size = h.inner_dim_size;
h.calculate(stripe_index, &segment_offset, &dim_index_base, &actual_height);
T minmax = pool.initial();
Index first_segment_id = segment_ids[dim_index_base];
// -1 is for the start value when interval_id = 0
Index last_segment_id = -1;
if (dim_index_base > 0) {
last_segment_id = segment_ids[dim_index_base - 1];
}
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
// ensure the segment_ids is sorted.
PADDLE_ENFORCE(current_segment_id >= last_segment_id,
"The segment ids should be sorted, but got "
"segment_ids[%d]:%d > segment_ids[%d]:%d.",
dim_index_base + j - 1,
dim_index_base + j,
last_segment_id,
current_segment_id);
if (current_segment_id > last_segment_id) {
// reset the interval value which do not have corresponding ids.
for (Index interval_id = last_segment_id + 1;
interval_id < current_segment_id;
++interval_id) {
*(output + interval_id * inner_dim_size + segment_offset) = T(0);
}
// don't update result when j=0
if (j > 0) {
const Index output_index =
last_segment_id * inner_dim_size + segment_offset;
if (last_segment_id == first_segment_id) {
pool.atomic(output + output_index, minmax);
} else {
*(output + output_index) = minmax;
}
minmax = pool.initial();
}
}
pool.compute(
input[(dim_index_base + j) * inner_dim_size + segment_offset],
&minmax);
last_segment_id = current_segment_id;
}
const Index output_index =
last_segment_id * inner_dim_size + segment_offset;
pool.atomic(output + output_index, minmax);
}
}
template <typename T, typename Index, typename Helper>
__global__ void SegmentIndexGradKernel(const Index* segment_ids,
const T* input,
const T* output,
const T* out_grad,
T* in_grad,
Helper h) {
CUDA_KERNEL_LOOP(stripe_index, h.total_stripe_count) {
Index segment_offset, dim_index_base, actual_height;
h.calculate(stripe_index, &segment_offset, &dim_index_base, &actual_height);
for (Index j = 0; j < actual_height; j++) {
Index current_segment_id = segment_ids[dim_index_base + j];
Index input_index =
(dim_index_base + j) * h.inner_dim_size + segment_offset;
Index output_index =
current_segment_id * h.inner_dim_size + segment_offset;
if (input[input_index] == output[output_index]) {
in_grad[input_index] = out_grad[output_index];
}
}
}
}
template <class T>
class MaxPool {
public:
DEVICE inline T initial() { return static_cast<T>(-FLT_MAX); }
DEVICE inline void compute(const T& x, T* y) { *y = *y > x ? *y : x; }
DEVICE inline T atomic(T* address, const T val) {
return phi::CudaAtomicMax(address, val);
}
};
template <class T>
class MinPool {
public:
DEVICE inline T initial() { return static_cast<T>(FLT_MAX); }
DEVICE inline void compute(const T& x, T* y) { *y = *y < x ? *y : x; }
DEVICE inline T atomic(T* address, const T val) {
return phi::CudaAtomicMin(address, val);
}
};
template <class T>
class SumPool {
public:
DEVICE inline T initial() { return static_cast<T>(0); }
DEVICE inline void compute(const T& x, T* y) { *y = *y + x; }
DEVICE inline T atomic(T* address, const T val) {
return phi::CudaAtomicAdd(address, val);
}
};
template <class T>
class ArrangeHelper {
public:
const T input_total_size;
const T input_length_size;
const T output_length_size;
T inner_dim_size;
T total_stripe_count;
const T DimTileSize = 8;
ArrangeHelper(T a, T b, T c)
: input_total_size(a), input_length_size(b), output_length_size(c) {
T input_outer_dim_num_stripe =
(input_length_size + DimTileSize - 1) / DimTileSize;
inner_dim_size = input_total_size / input_length_size;
total_stripe_count = inner_dim_size * input_outer_dim_num_stripe;
}
DEVICE inline void calculate(T stripe_index,
T* segment_offset,
T* dim_index_base,
T* actual_height) {
*segment_offset = stripe_index % inner_dim_size;
*dim_index_base = stripe_index / inner_dim_size * DimTileSize;
*actual_height = min(DimTileSize, input_length_size - *dim_index_base);
}
};
template <typename T, typename Index>
void SegmentPoolCUDAGradFunctor(const phi::GPUContext& ctx,
const DenseTensor& input,
const DenseTensor& segment_ids,
const DenseTensor& output,
const DenseTensor& out_grad,
DenseTensor* in_grad,
const std::string pooltype = "SUM") {
auto h = ArrangeHelper<Index>(
input.numel(), segment_ids.dims()[0], output.dims()[0]);
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(ctx, h.total_stripe_count);
if (pooltype == "MAX" || pooltype == "MIN") {
SegmentIndexGradKernel<T, Index, ArrangeHelper<Index>>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
ctx.stream()>>>(segment_ids.data<Index>(),
input.data<T>(),
output.data<T>(),
out_grad.data<T>(),
in_grad->data<T>(),
h);
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Unsupported segment pooling grad operation, Only MAX, MIN "
"available, but got %s.",
pooltype));
}
}
template <typename T>
__global__ void SimpleDiv(T* x, const T* y, const int len, const int dim) {
for (int i = blockIdx.x; i < len; i += gridDim.x) {
__shared__ T y_i;
auto base = i * dim;
if (threadIdx.x == 0) {
y_i = y[i];
}
__syncthreads();
for (int j = threadIdx.x; j < dim; j += blockDim.x) {
x[base + j] /= y_i;
}
}
}
template <typename T, typename IndexT>
class SegmentPoolFunctor<phi::GPUContext, T, IndexT> {
public:
void operator()(const phi::GPUContext& ctx,
const DenseTensor& input,
const DenseTensor& segment_ids,
DenseTensor* output,
DenseTensor* summed_ids = nullptr,
const std::string pooltype = "SUM") {
if (pooltype == "MEAN") {
// Sum the segment id num first
IndexT DimTileSize = 8;
auto input_length_size = segment_ids.numel();
auto total_stripe_count =
(input_length_size + DimTileSize - 1) / DimTileSize;
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(ctx, total_stripe_count);
SegmentSumIdsKernel<T, IndexT, IndexT(8)>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
ctx.stream()>>>(segment_ids.data<IndexT>(),
summed_ids->data<T>(),
input_length_size,
total_stripe_count);
}
auto h = ArrangeHelper<IndexT>(
input.numel(), segment_ids.dims()[0], output->dims()[0]);
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(ctx, h.total_stripe_count);
if (pooltype == "MEAN") {
SegmentMeanKernel<T, IndexT, IndexT(8)>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
ctx.stream()>>>(segment_ids.data<IndexT>(),
input.data<T>(),
output->data<T>(),
summed_ids->data<T>(),
h.input_length_size,
h.inner_dim_size,
h.output_length_size,
h.total_stripe_count);
} else if (pooltype == "SUM") {
SumPool<T> pool;
SegmentOpsKernel<T, IndexT, ArrangeHelper<IndexT>, SumPool<T>>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
ctx.stream()>>>(segment_ids.data<IndexT>(),
input.data<T>(),
output->data<T>(),
h,
pool);
} else if (pooltype == "MAX") {
MaxPool<T> pool;
SegmentOpsKernel<T, IndexT, ArrangeHelper<IndexT>, MaxPool<T>>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
ctx.stream()>>>(segment_ids.data<IndexT>(),
input.data<T>(),
output->data<T>(),
h,
pool);
} else if (pooltype == "MIN") {
MinPool<T> pool;
SegmentOpsKernel<T, IndexT, ArrangeHelper<IndexT>, MinPool<T>>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
ctx.stream()>>>(segment_ids.data<IndexT>(),
input.data<T>(),
output->data<T>(),
h,
pool);
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Unsupported segment pooling operation, Only MEAN, SUM, MAX, MIN "
"available, but got %s.",
pooltype));
}
}
};
template <typename T, typename IndexT>
class SegmentPoolGradFunctor<phi::GPUContext, T, IndexT> {
public:
void operator()(const phi::GPUContext& dev_ctx,
const DenseTensor& input,
const DenseTensor& output,
const DenseTensor& out_grad,
const DenseTensor& segments,
DenseTensor* in_grad,
const paddle::optional<DenseTensor>& summed_ids,
const std::string pooltype = "SUM") {
if (pooltype == "MAX" || pooltype == "MIN") {
SegmentPoolCUDAGradFunctor<T, IndexT>(
dev_ctx, input, segments, output, out_grad, in_grad, pooltype);
} else if (pooltype == "MEAN") {
DenseTensor mean_grad;
mean_grad.Resize(input.dims());
dev_ctx.template Alloc<T>(&mean_grad);
phi::Copy(dev_ctx, out_grad, dev_ctx.GetPlace(), false, &mean_grad);
int len = output.dims()[0];
int dim = output.numel() / len;
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, len);
SimpleDiv<T><<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(
mean_grad.data<T>(), summed_ids->data<T>(), len, dim);
phi::funcs::GPUGather<T, IndexT>(dev_ctx, mean_grad, segments, in_grad);
} else if (pooltype == "SUM") {
phi::funcs::GPUGather<T, IndexT>(dev_ctx, out_grad, segments, in_grad);
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Unsupported segment pooling operation, Only MEAN, SUM, MAX, MIN "
"available, but got %s.",
pooltype));
}
}
};
using GPU = phi::GPUContext;
using float16 = phi::dtype::float16;
template class SegmentPoolFunctor<GPU, float, int>;
template class SegmentPoolFunctor<GPU, float, int64_t>;
template class SegmentPoolFunctor<GPU, double, int>;
template class SegmentPoolFunctor<GPU, double, int64_t>;
template class SegmentPoolFunctor<GPU, int, int>;
template class SegmentPoolFunctor<GPU, int, int64_t>;
template class SegmentPoolFunctor<GPU, int64_t, int>;
template class SegmentPoolFunctor<GPU, int64_t, int64_t>;
template class SegmentPoolFunctor<GPU, float16, int>;
template class SegmentPoolFunctor<GPU, float16, int64_t>;
template class SegmentPoolFunctor<GPU, phi::dtype::bfloat16, int>;
template class SegmentPoolFunctor<GPU, phi::dtype::bfloat16, int64_t>;
template class SegmentPoolGradFunctor<GPU, float, int>;
template class SegmentPoolGradFunctor<GPU, float, int64_t>;
template class SegmentPoolGradFunctor<GPU, double, int>;
template class SegmentPoolGradFunctor<GPU, double, int64_t>;
template class SegmentPoolGradFunctor<GPU, int, int>;
template class SegmentPoolGradFunctor<GPU, int, int64_t>;
template class SegmentPoolGradFunctor<GPU, int64_t, int>;
template class SegmentPoolGradFunctor<GPU, int64_t, int64_t>;
template class SegmentPoolGradFunctor<GPU, float16, int>;
template class SegmentPoolGradFunctor<GPU, float16, int64_t>;
template class SegmentPoolGradFunctor<GPU, phi::dtype::bfloat16, int>;
template class SegmentPoolGradFunctor<GPU, phi::dtype::bfloat16, int64_t>;
} // namespace funcs
} // namespace phi
|
b8ddd550f2f3e0a5bcf15d70c2ee33fc0ec27384.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection2.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
// this definitionmust go here.
void CreateTexture2(int num_devices, float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream,int nStreamDevice,bool allocate);
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArray2Dev[5*PROJ_PER_KERNEL];
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, hipTextureObject_t tex)
{
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we don't go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArray2Dev[7*projNumber+1];
Point3D deltaZ = projParamsArray2Dev[7*projNumber+2];
Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3];
Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4];
Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5];
Point3D S = projParamsArray2Dev[7*projNumber+6];
float sinalpha = projSinCosArray2Dev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArray2Dev[5*projNumber+1];
float COR = projSinCosArray2Dev[5*projNumber+2];
float DSD = projSinCosArray2Dev[5*projNumber+3];
float DSO = projSinCosArray2Dev[5*projNumber+4];
// Precomputations for the weights:
//Real coords of Source
// We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate
Point3D realS;
realS.x= DSO*cosalpha;
realS.y=-DSO*sinalpha;
realS.z=0;
Point3D realvoxel_init;
realvoxel_init.x=-geo.sVoxelX/2+geo.dVoxelX/2+xyzOffset.x;
realvoxel_init.y=-geo.sVoxelY/2+geo.dVoxelY/2+xyzOffset.y;
realvoxel_init.z=-geo.sVoxelZ/2+geo.dVoxelZ/2+xyzOffset.z;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(DSD-DSO);
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=__fdividef(DSO-DSD-S.x,vectX);
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+(float)geo.nDetecU*0.5f;
v=z+(float)geo.nDetecV*0.5f;
float sample=tex3D<float>(tex, v, u ,indAlpha+0.5f);
float weigth=0;
//
//
//
// IMPORTANT: The weights are almost 50% of the computational time. Is there a way of speeding this up??
//
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=realvoxel_init.x+indX*geo.dVoxelX;
realvoxel.y=realvoxel_init.y+indY*geo.dVoxelY;
realvoxel.z=realvoxel_init.z+indZ*geo.dVoxelZ;
realDaux.y=(-geo.sDetecU+geo.dDetecU)*0.5f + u*geo.dDetecU +uv0Offset.x;
realD.z =(-geo.sDetecV+geo.dDetecV)*0.5f + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,lsq;
L = __fsqrt_rd( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
lsq = (realS.x-realvoxel.x)*(realS.x-realvoxel.x)
+ (realS.y-realvoxel.y)*(realS.y-realvoxel.y)
+ (realS.z-realvoxel.z)*(realS.z-realvoxel.z);
weigth=__fdividef(L*L*L,(DSD*lsq));
// weigth=1;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=sample* weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection2(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha){
// Prepare for MultiGPU
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n");
}
// Check the available devices, and if they are the same
int dev;
checkDevices();
// Split the CT problem
unsigned int split_image;
unsigned int split_projections;
splitCTbackprojection(deviceCount,geo,nalpha,&split_image,&split_projections);
// Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the
// image slices. The rest of the Geometry is the same
Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry));
createGeoArray(split_image*deviceCount,geo,geoArray,nalpha);
// Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly
// in the previous section this should leave enough space for the textures.
size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float);
float** dimage=(float**)malloc(deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMalloc((void**)&dimage[dev], num_bytes_img);
cudaCheckErrors("hipMalloc fail");
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & split_image>1){
hipHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable);
}
if (isHostRegisterSupported ){
hipHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=deviceCount*nStreamDevice;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));;
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < nStreamDevice; ++i){
hipStreamCreate(&stream[i+dev*nStreamDevice]);
}
}
// Kernel auxiliary variables
Point3D* projParamsArray2Host;
hipHostMalloc((void**)&projParamsArray2Host,7*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArray2Host;
hipHostMalloc((void**)&projSinCosArray2Host,5*PROJ_PER_KERNEL*sizeof(float));
// Texture object variables
hipTextureObject_t *texProj;
hipArray **d_cuArrTex;
texProj =(hipTextureObject_t*)malloc(deviceCount*2*sizeof(hipTextureObject_t));
d_cuArrTex =(hipArray**)malloc(deviceCount*2*sizeof(hipArray*));
unsigned int proj_split_overlap_number;
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
float** partial_projection;
size_t* proj_split_size;
for(unsigned int img_slice=0;img_slice<split_image;img_slice++){
//
// Initialize the memory if its the first time.
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemset(dimage[dev],0,num_bytes_img);
cudaCheckErrors("memset fail");
}
for( unsigned int proj=0;proj<split_projections;proj++){
// What is the size of the current chunk of proejctions we need in?
current_proj_split_size=(nalpha+split_projections-1)/split_projections;
// if its the last one its probably less
current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
if(!proj && !img_slice){
partial_projection=(float**)malloc(current_proj_split_size*sizeof(float*));
proj_split_size=(size_t*)malloc(current_proj_split_size*sizeof(size_t*));
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store result
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTexture2(deviceCount,
partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)*deviceCount],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)*deviceCount],
stream, nStreamDevice,
(proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStreamDevice+1]);
}
for (dev = 0; dev < deviceCount; dev++){
//Safety:
// Depends on the amount of GPUs, the case where a image slice is zero hight can happen.
// Just break the loop if we reached that point
if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0)
break;
hipSetDevice(dev);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++){
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
unsigned int j;
for(j=0; j<PROJ_PER_KERNEL; j++){
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj*(nalpha+split_projections-1)/split_projections // index of the global projection split
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
float sinalpha,cosalpha;
geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now.
geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1];
geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2];
sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha);
cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha);
projSinCosArray2Host[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArray2Host[5*j+1]=cosalpha;
projSinCosArray2Host[5*j+2]=geo.COR[currProjNumber_global];
projSinCosArray2Host[5*j+3]=geo.DSD[currProjNumber_global];
projSinCosArray2Host[5*j+4]=geo.DSO[currProjNumber_global];
computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global];
offDetec.x=geo.offDetecU[currProjNumber_global];
offDetec.y=geo.offDetecV[currProjNumber_global];
offDetec.z=0;//unused
projParamsArray2Host[7*j] =deltaX; // 7*j because we have 7 Point3D values per projection
projParamsArray2Host[7*j+1]=deltaY;
projParamsArray2Host[7*j+2]=deltaZ;
projParamsArray2Host[7*j+3]=xyzOrigin;
projParamsArray2Host[7*j+4]=offOrig;
projParamsArray2Host[7*j+5]=offDetec;
projParamsArray2Host[7*j+6]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
hipMemcpyToSymbolAsync(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*5*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[dev*nStreamDevice]);
hipMemcpyToSymbolAsync(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[dev*nStreamDevice]);
hipStreamSynchronize(stream[dev*nStreamDevice]);
hipLaunchKernelGGL(( kernelPixelBackprojection), dim3(grid),dim3(block),0,stream[dev*nStreamDevice], geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}
} // END sub-split of current projection chunk
} // END projection splits
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipLaunchKernelGGL(( matrixConstantMultiply), dim3(60),dim3(MAXTREADS),0,stream[dev*nStreamDevice], geoArray[img_slice*deviceCount+dev],dimage[dev],geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
}
// Now we need to take the image out of the GPU
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStreamDevice]);
num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float);
img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev);
hipMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, hipMemcpyDeviceToHost,stream[dev*nStreamDevice+1]);
}
} // end image splits
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
// Clean the GPU
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break; for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDestroyTextureObject(texProj[i*deviceCount+dev]);
hipFreeArray(d_cuArrTex[i*deviceCount+dev]);
}
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipFree(dimage[dev]);
}
hipHostFree(projSinCosArray2Host);
hipHostFree(projParamsArray2Host);
free(partial_projection);
free(proj_split_size);
freeGeoArray(split_image*deviceCount,geoArray);
if (isHostRegisterSupported & split_image>1){
hipHostUnregister(result);
}
if (isHostRegisterSupported){
hipHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]);
cudaCheckErrors("hipFree fail");
// hipDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
void CreateTexture2(int num_devices, float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream,int nStreamDevice,bool allocate){
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const hipExtent extent =make_hipExtent(geo.nDetecV, geo.nDetecU, nangles);
if (allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(dev);
//hipArray Descriptor
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
//cuda Array
hipMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(dev);
hipMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_hipPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3DAsync(©Params,stream[dev*nStreamDevice+1]);
}
//Array creation End
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(dev);
hipResourceDesc texRes;
memset(&texRes, 0, sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = hipFilterModeLinear;
texDescr.addressMode[0] = hipAddressModeBorder;
texDescr.addressMode[1] = hipAddressModeBorder;
texDescr.addressMode[2] = hipAddressModeBorder;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
}
}
#ifndef BACKPROJECTION_HPP
void checkDevices(void){
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
int dev;
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
const int devicenamelength = 256; // The length 256 is fixed by spec of hipDeviceProp_t::name
char devicename[devicenamelength];
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("Atb:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
}
void splitCTbackprojection(int deviceCount,Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// Compute how much memory each of the relevant memory pieces need
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPU?
if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){
// We only need to split if we have extra GPUs
*split_image=1;
*split_projections=1;
}
// We know we need to split, but:
// Does all the image fit in the GPU, with some slack for a stack of projections??
else
{
// As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits.
// Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image.
size_t mem_free=mem_GPU_global-mem_proj*PROJ_PER_KERNEL;
*split_image=(mem_image/deviceCount+mem_free-1)/mem_free;
// Now knowing how many splits we have for images, we can recompute how many slices of projections actually
// fit on the GPU. Must be more than 0 obviously.
mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe
*split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free;
}
}
void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]);
//Done for P, now source
Point3D source;
source.x=geo.DSD[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
#endif | b8ddd550f2f3e0a5bcf15d70c2ee33fc0ec27384.cu | /*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection2.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
// this definitionmust go here.
void CreateTexture2(int num_devices, float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream,int nStreamDevice,bool allocate);
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArray2Dev[5*PROJ_PER_KERNEL];
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex)
{
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we don't go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArray2Dev[7*projNumber+1];
Point3D deltaZ = projParamsArray2Dev[7*projNumber+2];
Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3];
Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4];
Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5];
Point3D S = projParamsArray2Dev[7*projNumber+6];
float sinalpha = projSinCosArray2Dev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArray2Dev[5*projNumber+1];
float COR = projSinCosArray2Dev[5*projNumber+2];
float DSD = projSinCosArray2Dev[5*projNumber+3];
float DSO = projSinCosArray2Dev[5*projNumber+4];
// Precomputations for the weights:
//Real coords of Source
// We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate
Point3D realS;
realS.x= DSO*cosalpha;
realS.y=-DSO*sinalpha;
realS.z=0;
Point3D realvoxel_init;
realvoxel_init.x=-geo.sVoxelX/2+geo.dVoxelX/2+xyzOffset.x;
realvoxel_init.y=-geo.sVoxelY/2+geo.dVoxelY/2+xyzOffset.y;
realvoxel_init.z=-geo.sVoxelZ/2+geo.dVoxelZ/2+xyzOffset.z;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(DSD-DSO);
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=__fdividef(DSO-DSD-S.x,vectX);
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+(float)geo.nDetecU*0.5f;
v=z+(float)geo.nDetecV*0.5f;
float sample=tex3D<float>(tex, v, u ,indAlpha+0.5f);
float weigth=0;
//
//
//
// IMPORTANT: The weights are almost 50% of the computational time. Is there a way of speeding this up??
//
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=realvoxel_init.x+indX*geo.dVoxelX;
realvoxel.y=realvoxel_init.y+indY*geo.dVoxelY;
realvoxel.z=realvoxel_init.z+indZ*geo.dVoxelZ;
realDaux.y=(-geo.sDetecU+geo.dDetecU)*0.5f + u*geo.dDetecU +uv0Offset.x;
realD.z =(-geo.sDetecV+geo.dDetecV)*0.5f + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,lsq;
L = __fsqrt_rd( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
lsq = (realS.x-realvoxel.x)*(realS.x-realvoxel.x)
+ (realS.y-realvoxel.y)*(realS.y-realvoxel.y)
+ (realS.z-realvoxel.z)*(realS.z-realvoxel.z);
weigth=__fdividef(L*L*L,(DSD*lsq));
// weigth=1;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=sample* weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection2(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha){
// Prepare for MultiGPU
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n");
}
// Check the available devices, and if they are the same
int dev;
checkDevices();
// Split the CT problem
unsigned int split_image;
unsigned int split_projections;
splitCTbackprojection(deviceCount,geo,nalpha,&split_image,&split_projections);
// Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the
// image slices. The rest of the Geometry is the same
Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry));
createGeoArray(split_image*deviceCount,geo,geoArray,nalpha);
// Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly
// in the previous section this should leave enough space for the textures.
size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float);
float** dimage=(float**)malloc(deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMalloc((void**)&dimage[dev], num_bytes_img);
cudaCheckErrors("cudaMalloc fail");
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & split_image>1){
cudaHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable);
}
if (isHostRegisterSupported ){
cudaHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=deviceCount*nStreamDevice;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));;
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < nStreamDevice; ++i){
cudaStreamCreate(&stream[i+dev*nStreamDevice]);
}
}
// Kernel auxiliary variables
Point3D* projParamsArray2Host;
cudaMallocHost((void**)&projParamsArray2Host,7*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArray2Host;
cudaMallocHost((void**)&projSinCosArray2Host,5*PROJ_PER_KERNEL*sizeof(float));
// Texture object variables
cudaTextureObject_t *texProj;
cudaArray **d_cuArrTex;
texProj =(cudaTextureObject_t*)malloc(deviceCount*2*sizeof(cudaTextureObject_t));
d_cuArrTex =(cudaArray**)malloc(deviceCount*2*sizeof(cudaArray*));
unsigned int proj_split_overlap_number;
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
float** partial_projection;
size_t* proj_split_size;
for(unsigned int img_slice=0;img_slice<split_image;img_slice++){
//
// Initialize the memory if its the first time.
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemset(dimage[dev],0,num_bytes_img);
cudaCheckErrors("memset fail");
}
for( unsigned int proj=0;proj<split_projections;proj++){
// What is the size of the current chunk of proejctions we need in?
current_proj_split_size=(nalpha+split_projections-1)/split_projections;
// if its the last one its probably less
current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
if(!proj && !img_slice){
partial_projection=(float**)malloc(current_proj_split_size*sizeof(float*));
proj_split_size=(size_t*)malloc(current_proj_split_size*sizeof(size_t*));
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store result
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTexture2(deviceCount,
partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)*deviceCount],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)*deviceCount],
stream, nStreamDevice,
(proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStreamDevice+1]);
}
for (dev = 0; dev < deviceCount; dev++){
//Safety:
// Depends on the amount of GPUs, the case where a image slice is zero hight can happen.
// Just break the loop if we reached that point
if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0)
break;
cudaSetDevice(dev);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++){
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
unsigned int j;
for(j=0; j<PROJ_PER_KERNEL; j++){
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj*(nalpha+split_projections-1)/split_projections // index of the global projection split
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
float sinalpha,cosalpha;
geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now.
geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1];
geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2];
sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha);
cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha);
projSinCosArray2Host[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArray2Host[5*j+1]=cosalpha;
projSinCosArray2Host[5*j+2]=geo.COR[currProjNumber_global];
projSinCosArray2Host[5*j+3]=geo.DSD[currProjNumber_global];
projSinCosArray2Host[5*j+4]=geo.DSO[currProjNumber_global];
computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global];
offDetec.x=geo.offDetecU[currProjNumber_global];
offDetec.y=geo.offDetecV[currProjNumber_global];
offDetec.z=0;//unused
projParamsArray2Host[7*j] =deltaX; // 7*j because we have 7 Point3D values per projection
projParamsArray2Host[7*j+1]=deltaY;
projParamsArray2Host[7*j+2]=deltaZ;
projParamsArray2Host[7*j+3]=xyzOrigin;
projParamsArray2Host[7*j+4]=offOrig;
projParamsArray2Host[7*j+5]=offDetec;
projParamsArray2Host[7*j+6]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
cudaMemcpyToSymbolAsync(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*5*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]);
cudaMemcpyToSymbolAsync(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]);
cudaStreamSynchronize(stream[dev*nStreamDevice]);
kernelPixelBackprojection<<<grid,block,0,stream[dev*nStreamDevice]>>>(geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}
} // END sub-split of current projection chunk
} // END projection splits
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
matrixConstantMultiply<<<60,MAXTREADS,0,stream[dev*nStreamDevice]>>>( geoArray[img_slice*deviceCount+dev],dimage[dev],geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
}
// Now we need to take the image out of the GPU
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStreamDevice]);
num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float);
img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev);
cudaMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, cudaMemcpyDeviceToHost,stream[dev*nStreamDevice+1]);
}
} // end image splits
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
// Clean the GPU
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break; for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDestroyTextureObject(texProj[i*deviceCount+dev]);
cudaFreeArray(d_cuArrTex[i*deviceCount+dev]);
}
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaFree(dimage[dev]);
}
cudaFreeHost(projSinCosArray2Host);
cudaFreeHost(projParamsArray2Host);
free(partial_projection);
free(proj_split_size);
freeGeoArray(split_image*deviceCount,geoArray);
if (isHostRegisterSupported & split_image>1){
cudaHostUnregister(result);
}
if (isHostRegisterSupported){
cudaHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]);
cudaCheckErrors("cudaFree fail");
// cudaDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
void CreateTexture2(int num_devices, float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream,int nStreamDevice,bool allocate){
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const cudaExtent extent =make_cudaExtent(geo.nDetecV, geo.nDetecU, nangles);
if (allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(dev);
//cudaArray Descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
cudaMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(dev);
cudaMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params,stream[dev*nStreamDevice+1]);
}
//Array creation End
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(dev);
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
}
}
#ifndef BACKPROJECTION_HPP
void checkDevices(void){
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
int dev;
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
const int devicenamelength = 256; // The length 256 is fixed by spec of cudaDeviceProp::name
char devicename[devicenamelength];
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("Atb:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
}
void splitCTbackprojection(int deviceCount,Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// Compute how much memory each of the relevant memory pieces need
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPU?
if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){
// We only need to split if we have extra GPUs
*split_image=1;
*split_projections=1;
}
// We know we need to split, but:
// Does all the image fit in the GPU, with some slack for a stack of projections??
else
{
// As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits.
// Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image.
size_t mem_free=mem_GPU_global-mem_proj*PROJ_PER_KERNEL;
*split_image=(mem_image/deviceCount+mem_free-1)/mem_free;
// Now knowing how many splits we have for images, we can recompute how many slices of projections actually
// fit on the GPU. Must be more than 0 obviously.
mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe
*split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free;
}
}
void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]);
//Done for P, now source
Point3D source;
source.x=geo.DSD[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
#endif |
d9d13319499306f0b9f2c0b0b9f3becd5b03f663.hip | // !!! This is a file automatically generated by hipify!!!
//Darrien Park
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
const size_t w = 200;
//kernel functions to be called from the host and executed in the gpu
//produces one output matrix element per thread
__global__ void MatrixAddKernel(float* a, float *b, float *sum, int width){
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if (row < width && col < width){ //only threads within range
sum[row*width + col] = a[row*width + col] + b[row*width + col];
}
}
//produces one output matrix row per thread
__global__ void MatrixAddRow(float* a, float *b, float *sum, int width){
int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row < width){ //only threads within range
int j;
for (j = 0; j < width; j++)
sum[row*width + j] = a[row*width + j] + b[row*width + j];
}
}
//produces one output matrix row per thread
__global__ void MatrixAddCol(float* a, float *b, float *sum, int width){
int col = blockIdx.x*blockDim.x + threadIdx.x;
if (col < width){ //only threads within range
for (int i = 0; i < width; i++)
sum[col + width*i] = a[col + width* i] + b[col + width * i];
}
}
//define a new type for matrix so that matrices can be stored on the heap; execution will not crash on large matrix sizes
typedef float squareMatrix[w];
//function to check if the resultant matrix from the CPU is the same as the GPU
void correct_output(squareMatrix *CPUout, squareMatrix *GPUout, int width){
for (int i = 0; i < width; i++)
for (int j = 0; j < width; j++){
if (CPUout[i][j] != GPUout[i][j])
printf("TEST FAILED\n");
}
printf("TEST PASSED\n");
}
int main(){
//define and initialize variables, allocate memory in heap for matrices
int size = w*w*sizeof(float);
squareMatrix *a, *b, *GPUsum, *CPUsum;
a = (squareMatrix *)malloc(size);
b = (squareMatrix *)malloc(size);
GPUsum = (squareMatrix *)malloc(size);
CPUsum = (squareMatrix *)malloc(size);
//populate the matrix with randum numbers between 0 and 10 to read output easily
srand(time(NULL));
for(int i =0; i<w; i++)
for(int j=0;j<w;j++){
a[i][j] = rand() % (10 + 1 - 0) + 0;
b[i][j] = rand() % (10 + 1 - 0) + 0;
}
//find number of blocks required which is width = width of matrix/block size
int NumBlocks = w/BLOCK_SIZE;
//if remainder, extra block is needed
if(w % BLOCK_SIZE) NumBlocks++;
//set grid dimensions
dim3 dimGrid(NumBlocks,NumBlocks); //for 16x16 parameters are (NumBlocks, Numblocks), for 16 block/thread (Numblocks)
//set block dimensions 16x16
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); //for 16x16 (BLOCK_SIZE, BLOCK_SIZE)
float *d_a, *d_b, *d_sum;
//allocate host memory onto device
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_sum, size);
hipMemcpyAsync(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpyAsync(d_b, b, size, hipMemcpyHostToDevice);
hipMemcpyAsync(d_sum, GPUsum, size, hipMemcpyHostToDevice);
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
float gpu_time = 0.0f;
//record gpu calculation time
hipEventRecord(start,0);
hipLaunchKernelGGL(( MatrixAddKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_b, d_sum, w); //change kernel name to compare performance
hipEventRecord(stop,0);
hipMemcpy(GPUsum, d_sum, size, hipMemcpyDeviceToHost);
//CPU calculation runs asynchronously with GPU
hipEvent_t CPUstart, CPUstop;
hipEventCreate(&CPUstart);
hipEventCreate(&CPUstop);
hipEventRecord(CPUstart);
for(int i = 0; i < w; i++)
for(int j =0; j<w; j++){
CPUsum[i][j] = a[i][j]+b[i][j];
}
hipEventRecord(CPUstop);
hipEventSynchronize(CPUstop);
float cpu_time = 0.0f;
hipEventElapsedTime(&cpu_time, CPUstart, CPUstop);
printf("Time spent executing bv the CPU: %.2f\n",cpu_time);
unsigned long int counter = 0;
while(hipEventQuery(stop) == hipErrorNotReady){
counter ++;
}
hipEventElapsedTime(&gpu_time,start,stop);
printf("Time spent executing bv the GPU: %.2f\n",gpu_time);
correct_output(CPUsum, GPUsum, w);
//free memory space pointed to by host
hipHostFree(a);
hipHostFree(b);
hipHostFree(GPUsum);
//free memory space pointed to by device
hipFree(d_a);
hipFree(d_b);
hipFree(d_sum);
hipDeviceReset();
return 0;
}
| d9d13319499306f0b9f2c0b0b9f3becd5b03f663.cu | //Darrien Park
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
const size_t w = 200;
//kernel functions to be called from the host and executed in the gpu
//produces one output matrix element per thread
__global__ void MatrixAddKernel(float* a, float *b, float *sum, int width){
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if (row < width && col < width){ //only threads within range
sum[row*width + col] = a[row*width + col] + b[row*width + col];
}
}
//produces one output matrix row per thread
__global__ void MatrixAddRow(float* a, float *b, float *sum, int width){
int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row < width){ //only threads within range
int j;
for (j = 0; j < width; j++)
sum[row*width + j] = a[row*width + j] + b[row*width + j];
}
}
//produces one output matrix row per thread
__global__ void MatrixAddCol(float* a, float *b, float *sum, int width){
int col = blockIdx.x*blockDim.x + threadIdx.x;
if (col < width){ //only threads within range
for (int i = 0; i < width; i++)
sum[col + width*i] = a[col + width* i] + b[col + width * i];
}
}
//define a new type for matrix so that matrices can be stored on the heap; execution will not crash on large matrix sizes
typedef float squareMatrix[w];
//function to check if the resultant matrix from the CPU is the same as the GPU
void correct_output(squareMatrix *CPUout, squareMatrix *GPUout, int width){
for (int i = 0; i < width; i++)
for (int j = 0; j < width; j++){
if (CPUout[i][j] != GPUout[i][j])
printf("TEST FAILED\n");
}
printf("TEST PASSED\n");
}
int main(){
//define and initialize variables, allocate memory in heap for matrices
int size = w*w*sizeof(float);
squareMatrix *a, *b, *GPUsum, *CPUsum;
a = (squareMatrix *)malloc(size);
b = (squareMatrix *)malloc(size);
GPUsum = (squareMatrix *)malloc(size);
CPUsum = (squareMatrix *)malloc(size);
//populate the matrix with randum numbers between 0 and 10 to read output easily
srand(time(NULL));
for(int i =0; i<w; i++)
for(int j=0;j<w;j++){
a[i][j] = rand() % (10 + 1 - 0) + 0;
b[i][j] = rand() % (10 + 1 - 0) + 0;
}
//find number of blocks required which is width = width of matrix/block size
int NumBlocks = w/BLOCK_SIZE;
//if remainder, extra block is needed
if(w % BLOCK_SIZE) NumBlocks++;
//set grid dimensions
dim3 dimGrid(NumBlocks,NumBlocks); //for 16x16 parameters are (NumBlocks, Numblocks), for 16 block/thread (Numblocks)
//set block dimensions 16x16
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); //for 16x16 (BLOCK_SIZE, BLOCK_SIZE)
float *d_a, *d_b, *d_sum;
//allocate host memory onto device
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_sum, size);
cudaMemcpyAsync(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_sum, GPUsum, size, cudaMemcpyHostToDevice);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
float gpu_time = 0.0f;
//record gpu calculation time
cudaEventRecord(start,0);
MatrixAddKernel<<<dimGrid,dimBlock>>>(d_a, d_b, d_sum, w); //change kernel name to compare performance
cudaEventRecord(stop,0);
cudaMemcpy(GPUsum, d_sum, size, cudaMemcpyDeviceToHost);
//CPU calculation runs asynchronously with GPU
cudaEvent_t CPUstart, CPUstop;
cudaEventCreate(&CPUstart);
cudaEventCreate(&CPUstop);
cudaEventRecord(CPUstart);
for(int i = 0; i < w; i++)
for(int j =0; j<w; j++){
CPUsum[i][j] = a[i][j]+b[i][j];
}
cudaEventRecord(CPUstop);
cudaEventSynchronize(CPUstop);
float cpu_time = 0.0f;
cudaEventElapsedTime(&cpu_time, CPUstart, CPUstop);
printf("Time spent executing bv the CPU: %.2f\n",cpu_time);
unsigned long int counter = 0;
while(cudaEventQuery(stop) == cudaErrorNotReady){
counter ++;
}
cudaEventElapsedTime(&gpu_time,start,stop);
printf("Time spent executing bv the GPU: %.2f\n",gpu_time);
correct_output(CPUsum, GPUsum, w);
//free memory space pointed to by host
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(GPUsum);
//free memory space pointed to by device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_sum);
cudaDeviceReset();
return 0;
}
|
7a365aea4c9d3d7b2bfc218f38036897720ab2ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <cutil_inline.h>
#include <shrUtils.h>
#include <shrQATest.h>
const char *sSDKsample = "concurrentKernels";
// This is a kernel that does no real work but runs at least for a specified number of clocks
__global__ void clock_block(clock_t* d_o, clock_t clock_count)
{
clock_t start_clock = clock();
clock_t clock_offset = 0;
while( clock_offset < clock_count ) {
clock_offset = clock() - start_clock;
}
d_o[0] = clock_offset;
}
// Single warp reduction kernel
__global__ void sum(clock_t* d_clocks, int N)
{
__shared__ clock_t s_clocks[32];
clock_t my_sum = 0;
for( int i = threadIdx.x; i < N; i+= blockDim.x ) {
my_sum += d_clocks[i];
}
s_clocks[threadIdx.x] = my_sum;
syncthreads();
for( int i=16; i>0; i/=2) {
if( threadIdx.x < i ) {
s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i];
}
syncthreads();
}
d_clocks[0] = s_clocks[0];
}
int main(int argc, char **argv)
{
int nkernels = 16; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int nbytes = nkernels * sizeof(clock_t); // number of data bytes
int kernel_time = 10; // time the kernel should run in ms
int cuda_device = 0;
shrQAStart(argc, argv);
// get number of kernels if overridden on the command line
if (cutCheckCmdLineFlag(argc, (const char **)argv, "nkernels")) {
cutGetCmdLineArgumenti(argc, (const char **)argv, "nkernels", &nkernels);
nstreams = nkernels + 1;
}
// get kernel_time if overridden on the command line
if (cutCheckCmdLineFlag(argc, (const char **)argv, "kernel_time")) {
cutGetCmdLineArgumenti(argc, (const char **)argv, "kernel_time", &kernel_time);
}
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
cuda_device = cutilChooseCudaDevice(argc, argv);
hipDeviceProp_t deviceProp;
cutilSafeCall( hipGetDevice(&cuda_device));
cutilSafeCall( hipGetDeviceProperties(&deviceProp, cuda_device) );
// allocate host memory
clock_t *a = 0; // pointer to the array data in host memory
cutilSafeCall( hipHostMalloc((void**)&a, nbytes) );
// allocate device memory
clock_t *d_a = 0; // pointers to data and init value in the device memory
cutilSafeCall( hipMalloc((void**)&d_a, nbytes) );
// allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t*) malloc(nstreams * sizeof(hipStream_t));
for(int i = 0; i < nstreams; i++)
cutilSafeCall( hipStreamCreate(&(streams[i])) );
hipEvent_t *kernelEvent;
kernelEvent = (hipEvent_t*) malloc(nkernels * sizeof(hipEvent_t));
for(int i = 0; i < nkernels; i++)
cutilSafeCall( hipEventCreateWithFlags(&(kernelEvent[i]), hipEventDisableTiming) );
//////////////////////////////////////////////////////////////////////
// time execution with nkernels streams
clock_t total_clocks = 0;
clock_t time_clocks = kernel_time * deviceProp.clockRate;
//shrLog("Time Clocks time is: %d", time_clocks);
// hipEventRecord(start_event, 0);
// queue nkernels in separate streams and record when they are done
for( int i=0; i<nkernels; ++i)
{
hipLaunchKernelGGL(( clock_block), dim3(1),dim3(1),0,streams[i], &d_a[i], time_clocks );
total_clocks += time_clocks;
cutilSafeCall( hipEventRecord(kernelEvent[i], streams[i]) );
// make the last stream wait for the kernel event to be recorded
cutilSafeCall( hipStreamWaitEvent(streams[nstreams-1], kernelEvent[i],0) );
}
// queue a sum kernel and a copy back to host in the last stream.
// the commands in this stream get dispatched as soon as all the kernel events have been recorded
hipLaunchKernelGGL(( sum), dim3(1),dim3(32),0,streams[nstreams-1], d_a, nkernels);
cutilSafeCall( hipMemcpyAsync(a, d_a, sizeof(clock_t), hipMemcpyDeviceToHost, streams[nstreams-1]) );
// at this point the CPU has dispatched all work for the GPU and can continue processing other tasks in parallel
// in this sample we just wait until the GPU is done
// release resources
for(int i = 0; i < nkernels; i++) {
hipStreamDestroy(streams[i]);
hipEventDestroy(kernelEvent[i]);
}
free(streams);
free(kernelEvent);
hipHostFree(a);
hipFree(d_a);
cutilDeviceReset();
return 0;
}
| 7a365aea4c9d3d7b2bfc218f38036897720ab2ab.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <cutil_inline.h>
#include <shrUtils.h>
#include <shrQATest.h>
const char *sSDKsample = "concurrentKernels";
// This is a kernel that does no real work but runs at least for a specified number of clocks
__global__ void clock_block(clock_t* d_o, clock_t clock_count)
{
clock_t start_clock = clock();
clock_t clock_offset = 0;
while( clock_offset < clock_count ) {
clock_offset = clock() - start_clock;
}
d_o[0] = clock_offset;
}
// Single warp reduction kernel
__global__ void sum(clock_t* d_clocks, int N)
{
__shared__ clock_t s_clocks[32];
clock_t my_sum = 0;
for( int i = threadIdx.x; i < N; i+= blockDim.x ) {
my_sum += d_clocks[i];
}
s_clocks[threadIdx.x] = my_sum;
syncthreads();
for( int i=16; i>0; i/=2) {
if( threadIdx.x < i ) {
s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i];
}
syncthreads();
}
d_clocks[0] = s_clocks[0];
}
int main(int argc, char **argv)
{
int nkernels = 16; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int nbytes = nkernels * sizeof(clock_t); // number of data bytes
int kernel_time = 10; // time the kernel should run in ms
int cuda_device = 0;
shrQAStart(argc, argv);
// get number of kernels if overridden on the command line
if (cutCheckCmdLineFlag(argc, (const char **)argv, "nkernels")) {
cutGetCmdLineArgumenti(argc, (const char **)argv, "nkernels", &nkernels);
nstreams = nkernels + 1;
}
// get kernel_time if overridden on the command line
if (cutCheckCmdLineFlag(argc, (const char **)argv, "kernel_time")) {
cutGetCmdLineArgumenti(argc, (const char **)argv, "kernel_time", &kernel_time);
}
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
cuda_device = cutilChooseCudaDevice(argc, argv);
cudaDeviceProp deviceProp;
cutilSafeCall( cudaGetDevice(&cuda_device));
cutilSafeCall( cudaGetDeviceProperties(&deviceProp, cuda_device) );
// allocate host memory
clock_t *a = 0; // pointer to the array data in host memory
cutilSafeCall( cudaMallocHost((void**)&a, nbytes) );
// allocate device memory
clock_t *d_a = 0; // pointers to data and init value in the device memory
cutilSafeCall( cudaMalloc((void**)&d_a, nbytes) );
// allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t*) malloc(nstreams * sizeof(cudaStream_t));
for(int i = 0; i < nstreams; i++)
cutilSafeCall( cudaStreamCreate(&(streams[i])) );
cudaEvent_t *kernelEvent;
kernelEvent = (cudaEvent_t*) malloc(nkernels * sizeof(cudaEvent_t));
for(int i = 0; i < nkernels; i++)
cutilSafeCall( cudaEventCreateWithFlags(&(kernelEvent[i]), cudaEventDisableTiming) );
//////////////////////////////////////////////////////////////////////
// time execution with nkernels streams
clock_t total_clocks = 0;
clock_t time_clocks = kernel_time * deviceProp.clockRate;
//shrLog("Time Clocks time is: %d", time_clocks);
// cudaEventRecord(start_event, 0);
// queue nkernels in separate streams and record when they are done
for( int i=0; i<nkernels; ++i)
{
clock_block<<<1,1,0,streams[i]>>>(&d_a[i], time_clocks );
total_clocks += time_clocks;
cutilSafeCall( cudaEventRecord(kernelEvent[i], streams[i]) );
// make the last stream wait for the kernel event to be recorded
cutilSafeCall( cudaStreamWaitEvent(streams[nstreams-1], kernelEvent[i],0) );
}
// queue a sum kernel and a copy back to host in the last stream.
// the commands in this stream get dispatched as soon as all the kernel events have been recorded
sum<<<1,32,0,streams[nstreams-1]>>>(d_a, nkernels);
cutilSafeCall( cudaMemcpyAsync(a, d_a, sizeof(clock_t), cudaMemcpyDeviceToHost, streams[nstreams-1]) );
// at this point the CPU has dispatched all work for the GPU and can continue processing other tasks in parallel
// in this sample we just wait until the GPU is done
// release resources
for(int i = 0; i < nkernels; i++) {
cudaStreamDestroy(streams[i]);
cudaEventDestroy(kernelEvent[i]);
}
free(streams);
free(kernelEvent);
cudaFreeHost(a);
cudaFree(d_a);
cutilDeviceReset();
return 0;
}
|
72ebc4a04a29e44474c2752d480ea92690af4001.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <torch/extension.h>
#include <ATen/ATen.h>
#include "cuda_launch.h"
#include "cuda_kernel.h"
#include <vector>
//////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
std::vector<at::Tensor> index_max_kernel(
at::Tensor index_vals, // [batch_size, 32, num_block]
at::Tensor indices, // [batch_size, num_block],
int A_num_block,
int B_num_block
) {
int batch_size = indices.size(0);
int num_block = indices.size(1);
at::Tensor max_vals = at::zeros({batch_size, A_num_block * 32}, index_vals.options());
at::Tensor max_vals_scatter = at::zeros({batch_size, 32, num_block}, index_vals.options());
dim3 threads(256);
dim3 blocks(batch_size);
int shared_mem = A_num_block * 32 * sizeof(float);
hipLaunchKernelGGL(( index_max_cuda_kernel), dim3(blocks), dim3(threads), shared_mem, 0,
index_vals.data_ptr<float>(),
indices.data_ptr<int>(),
max_vals.data_ptr<float>(),
max_vals_scatter.data_ptr<float>(),
batch_size,
A_num_block,
B_num_block,
num_block
);
return {max_vals, max_vals_scatter};
}
at::Tensor mm_to_sparse_kernel(
at::Tensor dense_A, // [batch_size, A_num_block, dim, 32]
at::Tensor dense_B, // [batch_size, B_num_block, dim, 32]
at::Tensor indices // [batch_size, num_block]
) {
int batch_size = dense_A.size(0);
int A_num_block = dense_A.size(1);
int B_num_block = dense_B.size(1);
int dim = dense_A.size(2);
int num_block = indices.size(1);
at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options());
dim3 threads(64, 4);
dim3 blocks(num_block / 4, batch_size);
hipLaunchKernelGGL(( mm_to_sparse_cuda_kernel), dim3(blocks), dim3(threads), 0, 0,
dense_A.data_ptr<float>(),
dense_B.data_ptr<float>(),
indices.data_ptr<int>(),
sparse_C.data_ptr<float>(),
batch_size,
A_num_block,
B_num_block,
dim,
num_block
);
return sparse_C;
}
at::Tensor sparse_dense_mm_kernel(
at::Tensor sparse_A, // [batch_size, num_block, 32, 32]
at::Tensor indices, // [batch_size, num_block]
at::Tensor dense_B, // [batch_size, B_num_block, dim, 32]
int A_num_block
) {
int batch_size = sparse_A.size(0);
int num_block = sparse_A.size(1);
int B_num_block = dense_B.size(1);
int dim = dense_B.size(2);
at::Tensor dense_C = at::zeros({batch_size, A_num_block, dim, 32}, dense_B.options());
dim3 threads(128, 2);
dim3 blocks(num_block / 2, batch_size);
hipLaunchKernelGGL(( sparse_dense_mm_cuda_kernel), dim3(blocks), dim3(threads), 0, 0,
sparse_A.data_ptr<float>(),
indices.data_ptr<int>(),
dense_B.data_ptr<float>(),
dense_C.data_ptr<float>(),
batch_size,
A_num_block,
B_num_block,
dim,
num_block
);
return dense_C;
}
at::Tensor reduce_sum_kernel(
at::Tensor sparse_A, // [batch_size, num_block, 32, 32]
at::Tensor indices, // [batch_size, num_block]
int A_num_block,
int B_num_block
) {
int batch_size = sparse_A.size(0);
int num_block = sparse_A.size(1);
at::Tensor dense_C = at::zeros({batch_size, A_num_block, 32}, sparse_A.options());
dim3 threads(32, 4);
dim3 blocks(num_block / 4, batch_size);
hipLaunchKernelGGL(( reduce_sum_cuda_kernel), dim3(blocks), dim3(threads), 0, 0,
sparse_A.data_ptr<float>(),
indices.data_ptr<int>(),
dense_C.data_ptr<float>(),
batch_size,
A_num_block,
B_num_block,
num_block
);
return dense_C;
}
at::Tensor scatter_kernel(
at::Tensor dense_A, // [batch_size, A_num_block, 32]
at::Tensor indices, // [batch_size, num_block]
int B_num_block
) {
int batch_size = dense_A.size(0);
int A_num_block = dense_A.size(1);
int num_block = indices.size(1);
at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options());
dim3 threads(32, 4);
dim3 blocks(num_block / 4, batch_size);
hipLaunchKernelGGL(( scatter_cuda_kernel), dim3(blocks), dim3(threads), 0, 0,
dense_A.data_ptr<float>(),
indices.data_ptr<int>(),
sparse_C.data_ptr<float>(),
batch_size,
A_num_block,
B_num_block,
num_block
);
return sparse_C;
}
| 72ebc4a04a29e44474c2752d480ea92690af4001.cu | #include <torch/extension.h>
#include <ATen/ATen.h>
#include "cuda_launch.h"
#include "cuda_kernel.h"
#include <vector>
//////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
std::vector<at::Tensor> index_max_kernel(
at::Tensor index_vals, // [batch_size, 32, num_block]
at::Tensor indices, // [batch_size, num_block],
int A_num_block,
int B_num_block
) {
int batch_size = indices.size(0);
int num_block = indices.size(1);
at::Tensor max_vals = at::zeros({batch_size, A_num_block * 32}, index_vals.options());
at::Tensor max_vals_scatter = at::zeros({batch_size, 32, num_block}, index_vals.options());
dim3 threads(256);
dim3 blocks(batch_size);
int shared_mem = A_num_block * 32 * sizeof(float);
index_max_cuda_kernel<<<blocks, threads, shared_mem>>>(
index_vals.data_ptr<float>(),
indices.data_ptr<int>(),
max_vals.data_ptr<float>(),
max_vals_scatter.data_ptr<float>(),
batch_size,
A_num_block,
B_num_block,
num_block
);
return {max_vals, max_vals_scatter};
}
at::Tensor mm_to_sparse_kernel(
at::Tensor dense_A, // [batch_size, A_num_block, dim, 32]
at::Tensor dense_B, // [batch_size, B_num_block, dim, 32]
at::Tensor indices // [batch_size, num_block]
) {
int batch_size = dense_A.size(0);
int A_num_block = dense_A.size(1);
int B_num_block = dense_B.size(1);
int dim = dense_A.size(2);
int num_block = indices.size(1);
at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options());
dim3 threads(64, 4);
dim3 blocks(num_block / 4, batch_size);
mm_to_sparse_cuda_kernel<<<blocks, threads>>>(
dense_A.data_ptr<float>(),
dense_B.data_ptr<float>(),
indices.data_ptr<int>(),
sparse_C.data_ptr<float>(),
batch_size,
A_num_block,
B_num_block,
dim,
num_block
);
return sparse_C;
}
at::Tensor sparse_dense_mm_kernel(
at::Tensor sparse_A, // [batch_size, num_block, 32, 32]
at::Tensor indices, // [batch_size, num_block]
at::Tensor dense_B, // [batch_size, B_num_block, dim, 32]
int A_num_block
) {
int batch_size = sparse_A.size(0);
int num_block = sparse_A.size(1);
int B_num_block = dense_B.size(1);
int dim = dense_B.size(2);
at::Tensor dense_C = at::zeros({batch_size, A_num_block, dim, 32}, dense_B.options());
dim3 threads(128, 2);
dim3 blocks(num_block / 2, batch_size);
sparse_dense_mm_cuda_kernel<<<blocks, threads>>>(
sparse_A.data_ptr<float>(),
indices.data_ptr<int>(),
dense_B.data_ptr<float>(),
dense_C.data_ptr<float>(),
batch_size,
A_num_block,
B_num_block,
dim,
num_block
);
return dense_C;
}
at::Tensor reduce_sum_kernel(
at::Tensor sparse_A, // [batch_size, num_block, 32, 32]
at::Tensor indices, // [batch_size, num_block]
int A_num_block,
int B_num_block
) {
int batch_size = sparse_A.size(0);
int num_block = sparse_A.size(1);
at::Tensor dense_C = at::zeros({batch_size, A_num_block, 32}, sparse_A.options());
dim3 threads(32, 4);
dim3 blocks(num_block / 4, batch_size);
reduce_sum_cuda_kernel<<<blocks, threads>>>(
sparse_A.data_ptr<float>(),
indices.data_ptr<int>(),
dense_C.data_ptr<float>(),
batch_size,
A_num_block,
B_num_block,
num_block
);
return dense_C;
}
at::Tensor scatter_kernel(
at::Tensor dense_A, // [batch_size, A_num_block, 32]
at::Tensor indices, // [batch_size, num_block]
int B_num_block
) {
int batch_size = dense_A.size(0);
int A_num_block = dense_A.size(1);
int num_block = indices.size(1);
at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options());
dim3 threads(32, 4);
dim3 blocks(num_block / 4, batch_size);
scatter_cuda_kernel<<<blocks, threads>>>(
dense_A.data_ptr<float>(),
indices.data_ptr<int>(),
sparse_C.data_ptr<float>(),
batch_size,
A_num_block,
B_num_block,
num_block
);
return sparse_C;
}
|
1c01ea19175e0401c61e63aa5712b668e3f7a55b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zailu_chow_csr_s.cu normal z -> s, Fri Jul 18 17:34:28 2014
*/
#include "common_magma.h"
#include "../include/magmasparse_s.h"
#include "../../include/magma.h"
#define PRECISION_s
// every row is handled by one threadblock
__global__ void
magma_ialu_csr_s_kernel( magma_int_t Lnum_rows,
magma_int_t Lnnz,
const float * __restrict__ AL,
float *valL,
magma_index_t *rowptrL,
magma_index_t *rowidxL,
magma_index_t *colidxL,
magma_int_t Unum_rows,
magma_int_t Unnz,
const float * __restrict__ AU,
float *valU,
magma_index_t *rowptrU,
magma_index_t *rowidxU,
magma_index_t *colidxU ){
int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
float zero = MAGMA_S_MAKE(0.0, 0.0);
float s, sp;
int il, iu, jl, ju;
if (k < Lnnz)
{
i = (blockIdx.y == 0 ) ? rowidxL[k] : rowidxU[k] ;
j = (blockIdx.y == 0 ) ? colidxL[k] : colidxU[k] ;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = (blockIdx.y == 0 ) ? __ldg( AL+k ) : __ldg( AU+k );
#else
s = (blockIdx.y == 0 ) ? AL[k] : AU[k] ;
#endif
il = rowptrL[i];
iu = rowptrU[j];
while (il < rowptrL[i+1] && iu < rowptrU[j+1])
{
sp = zero;
jl = colidxL[il];
ju = rowidxU[iu];
// avoid branching
sp = ( jl == ju ) ? valL[il] * valU[iu] : sp;
s = ( jl == ju ) ? s-sp : s;
il = ( jl <= ju ) ? il+1 : il;
iu = ( jl >= ju ) ? iu+1 : iu;
/*
if (jl < ju)
il++;
else if (ju < jl)
iu++;
else
{
// we are going to modify this u entry
sp = valL[il] * valU[iu];
s -= sp;
il++;
iu++;
}
*/
}
// undo the last operation (it must be the last)
s += sp;
__syncthreads();
// modify u entry
if (blockIdx.y == 0)
valL[k] = s / valU[rowptrU[j+1]-1];
else{
valU[k] = s;
}
}
}// kernel
/**
Purpose
-------
This routine computes the ILU approximation of a matrix iteratively.
The idea is according to Edmond Chow's presentation at SIAM 2014.
The input format of the matrix is Magma_CSRCOO for the upper and lower
triangular parts. Note however, that we flip col and rowidx for the
U-part.
Every component of L and U is handled by one thread.
Arguments
---------
@param
A_L magma_s_sparse_matrix
input matrix L
@param
A_U magma_s_sparse_matrix
input matrix U
@param
L magma_s_sparse_matrix
input/output matrix L containing the ILU approximation
@param
U magma_s_sparse_matrix
input/output matrix U containing the ILU approximation
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_ialu_csr_s( magma_s_sparse_matrix A_L,
magma_s_sparse_matrix A_U,
magma_s_sparse_matrix L,
magma_s_sparse_matrix U ){
int blocksize1 = 256;
int blocksize2 = 1;
int dimgrid1 = ( A_L.nnz + blocksize1 -1 ) / blocksize1;
int dimgrid2 = 2;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_ialu_csr_s_kernel), dim3(grid), dim3(block), 0, magma_stream ,
A_L.num_rows, A_L.nnz, A_L.val, L.val, L.row, L.rowidx, L.col,
A_U.num_rows, A_U.nnz, A_U.val, U.val, U.row, U.col, U.rowidx );
return MAGMA_SUCCESS;
}
| 1c01ea19175e0401c61e63aa5712b668e3f7a55b.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zailu_chow_csr_s.cu normal z -> s, Fri Jul 18 17:34:28 2014
*/
#include "common_magma.h"
#include "../include/magmasparse_s.h"
#include "../../include/magma.h"
#define PRECISION_s
// every row is handled by one threadblock
__global__ void
magma_ialu_csr_s_kernel( magma_int_t Lnum_rows,
magma_int_t Lnnz,
const float * __restrict__ AL,
float *valL,
magma_index_t *rowptrL,
magma_index_t *rowidxL,
magma_index_t *colidxL,
magma_int_t Unum_rows,
magma_int_t Unnz,
const float * __restrict__ AU,
float *valU,
magma_index_t *rowptrU,
magma_index_t *rowidxU,
magma_index_t *colidxU ){
int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
float zero = MAGMA_S_MAKE(0.0, 0.0);
float s, sp;
int il, iu, jl, ju;
if (k < Lnnz)
{
i = (blockIdx.y == 0 ) ? rowidxL[k] : rowidxU[k] ;
j = (blockIdx.y == 0 ) ? colidxL[k] : colidxU[k] ;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = (blockIdx.y == 0 ) ? __ldg( AL+k ) : __ldg( AU+k );
#else
s = (blockIdx.y == 0 ) ? AL[k] : AU[k] ;
#endif
il = rowptrL[i];
iu = rowptrU[j];
while (il < rowptrL[i+1] && iu < rowptrU[j+1])
{
sp = zero;
jl = colidxL[il];
ju = rowidxU[iu];
// avoid branching
sp = ( jl == ju ) ? valL[il] * valU[iu] : sp;
s = ( jl == ju ) ? s-sp : s;
il = ( jl <= ju ) ? il+1 : il;
iu = ( jl >= ju ) ? iu+1 : iu;
/*
if (jl < ju)
il++;
else if (ju < jl)
iu++;
else
{
// we are going to modify this u entry
sp = valL[il] * valU[iu];
s -= sp;
il++;
iu++;
}
*/
}
// undo the last operation (it must be the last)
s += sp;
__syncthreads();
// modify u entry
if (blockIdx.y == 0)
valL[k] = s / valU[rowptrU[j+1]-1];
else{
valU[k] = s;
}
}
}// kernel
/**
Purpose
-------
This routine computes the ILU approximation of a matrix iteratively.
The idea is according to Edmond Chow's presentation at SIAM 2014.
The input format of the matrix is Magma_CSRCOO for the upper and lower
triangular parts. Note however, that we flip col and rowidx for the
U-part.
Every component of L and U is handled by one thread.
Arguments
---------
@param
A_L magma_s_sparse_matrix
input matrix L
@param
A_U magma_s_sparse_matrix
input matrix U
@param
L magma_s_sparse_matrix
input/output matrix L containing the ILU approximation
@param
U magma_s_sparse_matrix
input/output matrix U containing the ILU approximation
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_ialu_csr_s( magma_s_sparse_matrix A_L,
magma_s_sparse_matrix A_U,
magma_s_sparse_matrix L,
magma_s_sparse_matrix U ){
int blocksize1 = 256;
int blocksize2 = 1;
int dimgrid1 = ( A_L.nnz + blocksize1 -1 ) / blocksize1;
int dimgrid2 = 2;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_ialu_csr_s_kernel<<< grid, block, 0, magma_stream >>>
( A_L.num_rows, A_L.nnz, A_L.val, L.val, L.row, L.rowidx, L.col,
A_U.num_rows, A_U.nnz, A_U.val, U.val, U.row, U.col, U.rowidx );
return MAGMA_SUCCESS;
}
|
ad0a0bae634cc5aa2d697a353231f0e2c5e0bd3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zgeadd.cu, normal z -> d, Tue Aug 30 09:38:27 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlaset.
*/
__global__
void dgeadd_full(
int m, int n,
double alpha,
const double *dA, int ldda,
double *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha DOUBLE PRECISION
The scalar alpha.
@param[in]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB DOUBLE PRECISION array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_dgeadd_q(
magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipLaunchKernelGGL(( dgeadd_full), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dA, ldda, dB, lddb );
}
| ad0a0bae634cc5aa2d697a353231f0e2c5e0bd3a.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zgeadd.cu, normal z -> d, Tue Aug 30 09:38:27 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlaset.
*/
__global__
void dgeadd_full(
int m, int n,
double alpha,
const double *dA, int ldda,
double *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha DOUBLE PRECISION
The scalar alpha.
@param[in]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB DOUBLE PRECISION array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_dgeadd_q(
magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
dgeadd_full<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, dA, ldda, dB, lddb );
}
|
a9d503f6772eb4dbb71cd61cb82b5e4f2ae0ac4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
/* The version for tesla can be found in ssymv_tesla.cu */
#define symv_bs 64
#define thread_x 64
#define thread_y 4
#define bank_shift 33
#define quarter_thread_x 16
#define half_thread_x 32
/*******************************************************************************
* Lower case, where n is multiple of block size (symv_bs)
*/
__global__ void
ssymv_kernel_fermi_L_special(
int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx,
float beta,
float * __restrict__ y, int incy,
float * __restrict__ WC)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
float res = MAGMA_S_ZERO;
float res_ = MAGMA_S_ZERO;
float res1 = MAGMA_S_ZERO;
__shared__ float la [quarter_thread_x][thread_x+2];
__shared__ float buff [thread_x];
__shared__ float buff2 [thread_x];
float tr[4];
float b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d * (lda+1);
A += ty_ * lda + tx_;
if ( ty == 0 ) {
buff[tx] = x[0];
} // obtain the vector x store in buff;
tx = tx_; ty = ty_;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_ * 4 + 4); i++) {
if ( i < tx_ )
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * i + tx_];
else
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 ) {
res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
float res2;
res2 = MAGMA_S_ZERO;
if ( ty_ == 1 ) {
res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
A -= half_thread_x*lda;
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_S_ZERO;
}
A -= ty_ * lda;
A -= tx_;
A = A - lda * blkc * thread_x;
x = x - blkc * thread_x * incx;
A += 4 * ty * lda;
A += tx;
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
if ( blkc * thread_x >= thread_x ) {
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x * k + ty*4 + j];
la[j + ty*4][tx] = tr[j] * buff[tx];
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
res_ += la[tx_][ty_*4+j];
}
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
}
for(int i=thread_x; i < (blkc * thread_x); i += thread_x ) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x*k + ty*4+(j)];
la[j + ty*4][tx] = tr[j] * buff[tx];
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j];
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res = la[0][tx]+ la[1][tx]
+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
ssymv_kernel_fermi_L_generic(
int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx,
float beta,
float * __restrict__ y, int incy,
float * __restrict__ WC,
int m_mod_thread_x)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
float res = MAGMA_S_ZERO;
float res_ = MAGMA_S_ZERO;
float res1 = MAGMA_S_ZERO;
__shared__ float la [quarter_thread_x][thread_x+2];
__shared__ float buff [thread_x];
__shared__ float buff2[thread_x];
float tr[4];
float b[8];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d * (lda+1);
A += lda * ty_;
int trackA;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( ty == 0 ) {
if ( tx > m_mod_thread_x ) {
buff[tx] = MAGMA_S_ZERO;
}
else
buff[tx] = x[0];
}
if ( tx_ > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_;
A += trackA;
}
else {
if ( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx_;
A += trackA;
}
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if ( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 9999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
tx = tx_;
ty = ty_;
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_*4+4); i++) {
if ( i < tx_ )
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 ) {
res1 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( (tx_+half_thread_x) > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_ + half_thread_x;
A += trackA+half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 99999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA+half_thread_x*lda;
A += tx_;
A += half_thread_x + half_thread_x*lda;
}
else {
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
float res2;
res2 = MAGMA_S_ZERO;
if ( ty_ == 1 ) {
res2 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
res_ = MAGMA_S_ZERO;
A -= half_thread_x*lda;
if ( blkc == ( gridDim.x - 1 ) ) {
A -= tx_;
if ( tx_ > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_;
A += trackA;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
if ( ( ty_ + j ) > m_mod_thread_x ) {
tr[j/8] = MAGMA_S_MAKE( 99999, 0 );
}
else
tr[j/8] = A[ j * lda];
A -= trackA;
A += tx_;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_S_ZERO;
}
A -= ty_ * lda;
A -= tx_;
A = A - lda*break_d;
x = x - break_d*incx;
A += 4 * ty * lda;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( tx <= m_mod_thread_x )
A += tx;
else
A += m_mod_thread_x;
}
else{
A += tx;
}
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
#pragma unroll
for(int j=0; j < 4; j++)
b[j] = buff[ty_*4+j];
if ( break_d > 0 )
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = tr[j];
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j] * b[j];
b[4+k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
for( int i=thread_x; i < break_d; i += thread_x ) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = tr[j];
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j] * b[j];
b[4+k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
__global__ void
ssymv_kernel_fermi_L_update(
int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx,
float beta,
float * __restrict__ y, int incy,
float * __restrict__ WC )
{
#if (__CUDA_ARCH__ >= 200)
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * thread_x + tx;
float Ca;
Ca = MAGMA_S_ZERO;
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*thread_x; i < n; i += thread_x) {
Ca += WC[0];
WC += thread_x;
}
if ( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
#endif /* (__CUDA_ARCH__ >= 200) */
}
extern "C"
void magmablas_ssymv_fermi_L(
magma_int_t n, float alpha,
const float *A, magma_int_t lda,
const float *x, magma_int_t incx,
float beta,
float *y, magma_int_t incy,
float *dwork)
{
magma_int_t blocks = (n - 1)/symv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(thread_x, thread_y, 1);
dim3 threads_u(symv_bs, 1, 1);
/*
* If matrix size is multiple of symv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if ( n % symv_bs == 0 ) {
hipLaunchKernelGGL(( ssymv_kernel_fermi_L_special), dim3(grid), dim3(threads), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
else{
magma_int_t m_mod_thread_x = (n % symv_bs) - 1;
hipLaunchKernelGGL(( ssymv_kernel_fermi_L_generic), dim3(grid), dim3(threads), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork, m_mod_thread_x);
}
hipLaunchKernelGGL(( ssymv_kernel_fermi_L_update), dim3(grid), dim3(threads_u), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
/*************************************************************************
Purpose
=======
magmablas_ssymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
==========
UPLO CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA REAL.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A REAL array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_ssymv(
char uplo, magma_int_t n,
float alpha,
const float *A, magma_int_t lda,
const float *x, magma_int_t incx,
float beta,
float *y, magma_int_t incy)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sdc] precisions, cublas for [z] precisions.
// NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2.
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "real case not implemented");
//hipblasSsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
magmablas_ssymv_tesla( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#endif
return MAGMA_SUCCESS;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
/* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */
if ( upper ) {
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented");
//hipblasSsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
hipblasSsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy);
#endif
}
else {
float *dwork;
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwork = lda * (blocks + 1);
// TODO deal with error
magma_smalloc( &dwork, lwork );
magmablas_ssymv_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork );
magma_free( dwork );
}
return MAGMA_SUCCESS;
}
/*************************************************************************
Purpose
=======
magmablas_ssymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
the interface of magmablas_ssymv_work is different from magmablas_ssymv in
the last argument dwork
MAGMA implements ssymv through two steps:
1) perform the multiplication in each thread blocks and put the intermediate value
in a space of device memory which we call working space. dwork is the working space
2) sum the intermediate values and store the final result in y.
The size of dwork is
lda * ceil(n/thread_x)
where thread_x = 64
magamblas_ssymv_work requires users to provide a working space, while magmablas_ssymv is
a wrapper routine allocating the working space inside the routine
and provides the same interface with cublas.
If users need to call ssymv frequently, we suggest using magmablas_ssymv_work instead of magmablas_ssymv,
as the overhead of allocating and free device memory in magmablas_ssymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when matrix size is around 10000.
*/
extern "C"
magma_int_t
magmablas_ssymv_work(
char uplo, magma_int_t n,
float alpha,
const float *A, magma_int_t lda,
const float *x, magma_int_t incx,
float beta,
float *y, magma_int_t incy,
float *dwork, magma_int_t lwork)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sdc] precisions, cublas for [z] precisions.
// NOTE: [z]symv is not implemented in cublas v1, but is in cublas v2.
#if defined(PRECISION_z)
fprintf(stderr, "%s: %s\n", __func__, "real case not implemented");
//hipblasSsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
magmablas_ssymv_tesla_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork );
#endif
return MAGMA_SUCCESS;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
/* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */
if ( upper ) {
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented");
return MAGMA_ERR_NOT_SUPPORTED;
#else
hipblasSsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy);
#endif
}
else {
/* TODO check lwork size! */
magmablas_ssymv_fermi_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
return MAGMA_SUCCESS;
}
| a9d503f6772eb4dbb71cd61cb82b5e4f2ae0ac4c.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define PRECISION_s
/* The version for tesla can be found in ssymv_tesla.cu */
#define symv_bs 64
#define thread_x 64
#define thread_y 4
#define bank_shift 33
#define quarter_thread_x 16
#define half_thread_x 32
/*******************************************************************************
* Lower case, where n is multiple of block size (symv_bs)
*/
__global__ void
ssymv_kernel_fermi_L_special(
int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx,
float beta,
float * __restrict__ y, int incy,
float * __restrict__ WC)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
float res = MAGMA_S_ZERO;
float res_ = MAGMA_S_ZERO;
float res1 = MAGMA_S_ZERO;
__shared__ float la [quarter_thread_x][thread_x+2];
__shared__ float buff [thread_x];
__shared__ float buff2 [thread_x];
float tr[4];
float b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d * (lda+1);
A += ty_ * lda + tx_;
if ( ty == 0 ) {
buff[tx] = x[0];
} // obtain the vector x store in buff;
tx = tx_; ty = ty_;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_ * 4 + 4); i++) {
if ( i < tx_ )
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * i + tx_];
else
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 ) {
res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
float res2;
res2 = MAGMA_S_ZERO;
if ( ty_ == 1 ) {
res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
A -= half_thread_x*lda;
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_S_ZERO;
}
A -= ty_ * lda;
A -= tx_;
A = A - lda * blkc * thread_x;
x = x - blkc * thread_x * incx;
A += 4 * ty * lda;
A += tx;
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
if ( blkc * thread_x >= thread_x ) {
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x * k + ty*4 + j];
la[j + ty*4][tx] = tr[j] * buff[tx];
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
res_ += la[tx_][ty_*4+j];
}
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
}
for(int i=thread_x; i < (blkc * thread_x); i += thread_x ) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x*k + ty*4+(j)];
la[j + ty*4][tx] = tr[j] * buff[tx];
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j];
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res = la[0][tx]+ la[1][tx]
+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
ssymv_kernel_fermi_L_generic(
int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx,
float beta,
float * __restrict__ y, int incy,
float * __restrict__ WC,
int m_mod_thread_x)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
float res = MAGMA_S_ZERO;
float res_ = MAGMA_S_ZERO;
float res1 = MAGMA_S_ZERO;
__shared__ float la [quarter_thread_x][thread_x+2];
__shared__ float buff [thread_x];
__shared__ float buff2[thread_x];
float tr[4];
float b[8];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d * (lda+1);
A += lda * ty_;
int trackA;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( ty == 0 ) {
if ( tx > m_mod_thread_x ) {
buff[tx] = MAGMA_S_ZERO;
}
else
buff[tx] = x[0];
}
if ( tx_ > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_;
A += trackA;
}
else {
if ( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx_;
A += trackA;
}
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if ( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 9999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
tx = tx_;
ty = ty_;
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_*4+4); i++) {
if ( i < tx_ )
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 ) {
res1 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( (tx_+half_thread_x) > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_ + half_thread_x;
A += trackA+half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 99999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA+half_thread_x*lda;
A += tx_;
A += half_thread_x + half_thread_x*lda;
}
else {
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
float res2;
res2 = MAGMA_S_ZERO;
if ( ty_ == 1 ) {
res2 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
res = MAGMA_S_ZERO;
res_ = MAGMA_S_ZERO;
A -= half_thread_x*lda;
if ( blkc == ( gridDim.x - 1 ) ) {
A -= tx_;
if ( tx_ > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_;
A += trackA;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
if ( ( ty_ + j ) > m_mod_thread_x ) {
tr[j/8] = MAGMA_S_MAKE( 99999, 0 );
}
else
tr[j/8] = A[ j * lda];
A -= trackA;
A += tx_;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_S_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_S_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_S_ZERO;
}
A -= ty_ * lda;
A -= tx_;
A = A - lda*break_d;
x = x - break_d*incx;
A += 4 * ty * lda;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( tx <= m_mod_thread_x )
A += tx;
else
A += m_mod_thread_x;
}
else{
A += tx;
}
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
#pragma unroll
for(int j=0; j < 4; j++)
b[j] = buff[ty_*4+j];
if ( break_d > 0 )
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = tr[j];
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j] * b[j];
b[4+k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
for( int i=thread_x; i < break_d; i += thread_x ) {
res_ = MAGMA_S_ZERO;
count++;
if ( ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = tr[j];
}
__syncthreads();
res_ = MAGMA_S_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j] * b[j];
b[4+k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
__global__ void
ssymv_kernel_fermi_L_update(
int n, float alpha,
const float * __restrict__ A, int lda,
const float * __restrict__ x, int incx,
float beta,
float * __restrict__ y, int incy,
float * __restrict__ WC )
{
#if (__CUDA_ARCH__ >= 200)
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * thread_x + tx;
float Ca;
Ca = MAGMA_S_ZERO;
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*thread_x; i < n; i += thread_x) {
Ca += WC[0];
WC += thread_x;
}
if ( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
#endif /* (__CUDA_ARCH__ >= 200) */
}
extern "C"
void magmablas_ssymv_fermi_L(
magma_int_t n, float alpha,
const float *A, magma_int_t lda,
const float *x, magma_int_t incx,
float beta,
float *y, magma_int_t incy,
float *dwork)
{
magma_int_t blocks = (n - 1)/symv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(thread_x, thread_y, 1);
dim3 threads_u(symv_bs, 1, 1);
/*
* If matrix size is multiple of symv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if ( n % symv_bs == 0 ) {
ssymv_kernel_fermi_L_special<<< grid, threads, 0, magma_stream >>>
(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
else{
magma_int_t m_mod_thread_x = (n % symv_bs) - 1;
ssymv_kernel_fermi_L_generic<<< grid, threads, 0, magma_stream >>>
(n, alpha, A, lda, x, incx, beta, y, incy, dwork, m_mod_thread_x);
}
ssymv_kernel_fermi_L_update<<< grid, threads_u, 0, magma_stream >>>
(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
/*************************************************************************
Purpose
=======
magmablas_ssymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
==========
UPLO CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA REAL.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A REAL array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y REAL array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_ssymv(
char uplo, magma_int_t n,
float alpha,
const float *A, magma_int_t lda,
const float *x, magma_int_t incx,
float beta,
float *y, magma_int_t incy)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sdc] precisions, cublas for [z] precisions.
// NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2.
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "real case not implemented");
//cublasSsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
magmablas_ssymv_tesla( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#endif
return MAGMA_SUCCESS;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
/* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */
if ( upper ) {
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented");
//cublasSsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
cublasSsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy);
#endif
}
else {
float *dwork;
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwork = lda * (blocks + 1);
// TODO deal with error
magma_smalloc( &dwork, lwork );
magmablas_ssymv_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork );
magma_free( dwork );
}
return MAGMA_SUCCESS;
}
/*************************************************************************
Purpose
=======
magmablas_ssymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
the interface of magmablas_ssymv_work is different from magmablas_ssymv in
the last argument dwork
MAGMA implements ssymv through two steps:
1) perform the multiplication in each thread blocks and put the intermediate value
in a space of device memory which we call working space. dwork is the working space
2) sum the intermediate values and store the final result in y.
The size of dwork is
lda * ceil(n/thread_x)
where thread_x = 64
magamblas_ssymv_work requires users to provide a working space, while magmablas_ssymv is
a wrapper routine allocating the working space inside the routine
and provides the same interface with cublas.
If users need to call ssymv frequently, we suggest using magmablas_ssymv_work instead of magmablas_ssymv,
as the overhead of allocating and free device memory in magmablas_ssymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when matrix size is around 10000.
*/
extern "C"
magma_int_t
magmablas_ssymv_work(
char uplo, magma_int_t n,
float alpha,
const float *A, magma_int_t lda,
const float *x, magma_int_t incx,
float beta,
float *y, magma_int_t incy,
float *dwork, magma_int_t lwork)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sdc] precisions, cublas for [z] precisions.
// NOTE: [z]symv is not implemented in cublas v1, but is in cublas v2.
#if defined(PRECISION_z)
fprintf(stderr, "%s: %s\n", __func__, "real case not implemented");
//cublasSsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
magmablas_ssymv_tesla_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork );
#endif
return MAGMA_SUCCESS;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
/* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */
if ( upper ) {
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented");
return MAGMA_ERR_NOT_SUPPORTED;
#else
cublasSsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy);
#endif
}
else {
/* TODO check lwork size! */
magmablas_ssymv_fermi_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
return MAGMA_SUCCESS;
}
|
96ad0c33183e3109a2bc21e6746dfadfc9432ea4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//note: please do not modify this file manually!
// this file has been generated automatically by BOAST version 0.99996
// by: make boast_kernels
/*
!=====================================================================
!
! S p e c f e m 3 D G l o b e V e r s i o n 7 . 0
! --------------------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#ifndef INDEX2
#define INDEX2(isize,i,j) i + isize*j
#endif
#ifndef INDEX3
#define INDEX3(isize,jsize,i,j,k) i + isize*(j + jsize*k)
#endif
#ifndef INDEX4
#define INDEX4(isize,jsize,ksize,i,j,k,x) i + isize*(j + jsize*(k + ksize*x))
#endif
#ifndef INDEX5
#define INDEX5(isize,jsize,ksize,xsize,i,j,k,x,y) i + isize*(j + jsize*(k + ksize*(x + xsize*y)))
#endif
#ifndef NDIM
#define NDIM 3
#endif
#ifndef NGLLX
#define NGLLX 5
#endif
#ifndef NGLL2
#define NGLL2 25
#endif
#ifndef NGLL3
#define NGLL3 125
#endif
#ifndef NGLL3_PADDED
#define NGLL3_PADDED 128
#endif
#ifndef N_SLS
#define N_SLS 3
#endif
#ifndef IREGION_CRUST_MANTLE
#define IREGION_CRUST_MANTLE 1
#endif
#ifndef IREGION_INNER_CORE
#define IREGION_INNER_CORE 3
#endif
#ifndef IFLAG_IN_FICTITIOUS_CUBE
#define IFLAG_IN_FICTITIOUS_CUBE 11
#endif
#ifndef R_EARTH_KM
#define R_EARTH_KM 6371.0f
#endif
#ifndef COLORING_MIN_NSPEC_INNER_CORE
#define COLORING_MIN_NSPEC_INNER_CORE 1000
#endif
#ifndef COLORING_MIN_NSPEC_OUTER_CORE
#define COLORING_MIN_NSPEC_OUTER_CORE 1000
#endif
#ifndef BLOCKSIZE_TRANSFER
#define BLOCKSIZE_TRANSFER 256
#endif
__global__ void compute_coupling_ocean_kernel(float * accel_crust_mantle, const float * rmassx_crust_mantle, const float * rmassy_crust_mantle, const float * rmassz_crust_mantle, const float * rmass_ocean_load, const int npoin_ocean_load, const int * ibool_ocean_load, const float * normal_ocean_load){
int ipoin;
int iglob;
float nx;
float ny;
float nz;
float rmass;
float force_normal_comp;
float additional_term_x;
float additional_term_y;
float additional_term_z;
ipoin = threadIdx.x + (blockIdx.x) * (blockDim.x) + ((gridDim.x) * (blockDim.x)) * (threadIdx.y + (blockIdx.y) * (blockDim.y));
if (ipoin < npoin_ocean_load) {
iglob = ibool_ocean_load[ipoin] - (1);
nx = normal_ocean_load[INDEX2(NDIM, 0, ipoin)];
ny = normal_ocean_load[INDEX2(NDIM, 1, ipoin)];
nz = normal_ocean_load[INDEX2(NDIM, 2, ipoin)];
force_normal_comp = ((accel_crust_mantle[0 + (3) * (iglob)]) * (nx)) / (rmassx_crust_mantle[iglob]) + ((accel_crust_mantle[1 + (3) * (iglob)]) * (ny)) / (rmassy_crust_mantle[iglob]) + ((accel_crust_mantle[2 + (3) * (iglob)]) * (nz)) / (rmassz_crust_mantle[iglob]);
rmass = rmass_ocean_load[ipoin];
additional_term_x = (rmass - (rmassx_crust_mantle[iglob])) * (force_normal_comp);
additional_term_y = (rmass - (rmassy_crust_mantle[iglob])) * (force_normal_comp);
additional_term_z = (rmass - (rmassz_crust_mantle[iglob])) * (force_normal_comp);
accel_crust_mantle[0 + (3) * (iglob)] = accel_crust_mantle[0 + (3) * (iglob)] + (additional_term_x) * (nx);
accel_crust_mantle[1 + (3) * (iglob)] = accel_crust_mantle[1 + (3) * (iglob)] + (additional_term_y) * (ny);
accel_crust_mantle[2 + (3) * (iglob)] = accel_crust_mantle[2 + (3) * (iglob)] + (additional_term_z) * (nz);
}
}
| 96ad0c33183e3109a2bc21e6746dfadfc9432ea4.cu | //note: please do not modify this file manually!
// this file has been generated automatically by BOAST version 0.99996
// by: make boast_kernels
/*
!=====================================================================
!
! S p e c f e m 3 D G l o b e V e r s i o n 7 . 0
! --------------------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#ifndef INDEX2
#define INDEX2(isize,i,j) i + isize*j
#endif
#ifndef INDEX3
#define INDEX3(isize,jsize,i,j,k) i + isize*(j + jsize*k)
#endif
#ifndef INDEX4
#define INDEX4(isize,jsize,ksize,i,j,k,x) i + isize*(j + jsize*(k + ksize*x))
#endif
#ifndef INDEX5
#define INDEX5(isize,jsize,ksize,xsize,i,j,k,x,y) i + isize*(j + jsize*(k + ksize*(x + xsize*y)))
#endif
#ifndef NDIM
#define NDIM 3
#endif
#ifndef NGLLX
#define NGLLX 5
#endif
#ifndef NGLL2
#define NGLL2 25
#endif
#ifndef NGLL3
#define NGLL3 125
#endif
#ifndef NGLL3_PADDED
#define NGLL3_PADDED 128
#endif
#ifndef N_SLS
#define N_SLS 3
#endif
#ifndef IREGION_CRUST_MANTLE
#define IREGION_CRUST_MANTLE 1
#endif
#ifndef IREGION_INNER_CORE
#define IREGION_INNER_CORE 3
#endif
#ifndef IFLAG_IN_FICTITIOUS_CUBE
#define IFLAG_IN_FICTITIOUS_CUBE 11
#endif
#ifndef R_EARTH_KM
#define R_EARTH_KM 6371.0f
#endif
#ifndef COLORING_MIN_NSPEC_INNER_CORE
#define COLORING_MIN_NSPEC_INNER_CORE 1000
#endif
#ifndef COLORING_MIN_NSPEC_OUTER_CORE
#define COLORING_MIN_NSPEC_OUTER_CORE 1000
#endif
#ifndef BLOCKSIZE_TRANSFER
#define BLOCKSIZE_TRANSFER 256
#endif
__global__ void compute_coupling_ocean_kernel(float * accel_crust_mantle, const float * rmassx_crust_mantle, const float * rmassy_crust_mantle, const float * rmassz_crust_mantle, const float * rmass_ocean_load, const int npoin_ocean_load, const int * ibool_ocean_load, const float * normal_ocean_load){
int ipoin;
int iglob;
float nx;
float ny;
float nz;
float rmass;
float force_normal_comp;
float additional_term_x;
float additional_term_y;
float additional_term_z;
ipoin = threadIdx.x + (blockIdx.x) * (blockDim.x) + ((gridDim.x) * (blockDim.x)) * (threadIdx.y + (blockIdx.y) * (blockDim.y));
if (ipoin < npoin_ocean_load) {
iglob = ibool_ocean_load[ipoin] - (1);
nx = normal_ocean_load[INDEX2(NDIM, 0, ipoin)];
ny = normal_ocean_load[INDEX2(NDIM, 1, ipoin)];
nz = normal_ocean_load[INDEX2(NDIM, 2, ipoin)];
force_normal_comp = ((accel_crust_mantle[0 + (3) * (iglob)]) * (nx)) / (rmassx_crust_mantle[iglob]) + ((accel_crust_mantle[1 + (3) * (iglob)]) * (ny)) / (rmassy_crust_mantle[iglob]) + ((accel_crust_mantle[2 + (3) * (iglob)]) * (nz)) / (rmassz_crust_mantle[iglob]);
rmass = rmass_ocean_load[ipoin];
additional_term_x = (rmass - (rmassx_crust_mantle[iglob])) * (force_normal_comp);
additional_term_y = (rmass - (rmassy_crust_mantle[iglob])) * (force_normal_comp);
additional_term_z = (rmass - (rmassz_crust_mantle[iglob])) * (force_normal_comp);
accel_crust_mantle[0 + (3) * (iglob)] = accel_crust_mantle[0 + (3) * (iglob)] + (additional_term_x) * (nx);
accel_crust_mantle[1 + (3) * (iglob)] = accel_crust_mantle[1 + (3) * (iglob)] + (additional_term_y) * (ny);
accel_crust_mantle[2 + (3) * (iglob)] = accel_crust_mantle[2 + (3) * (iglob)] + (additional_term_z) * (nz);
}
}
|
8f8b03c98f2946ee25f0c6d44f54f3304d351de2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void profileSubphaseComputeCoarseA_kernel() {} | 8f8b03c98f2946ee25f0c6d44f54f3304d351de2.cu | #include "includes.h"
__global__ void profileSubphaseComputeCoarseA_kernel() {} |
8a0727e4f22abac4369138583f24629f2e364cb5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <omp.h>
#include <hip/hip_runtime.h>
#define THREADS_PER_DIM 16
#define BLOCKS_PER_DIM 16
#define THREADS_PER_BLOCK THREADS_PER_DIM*THREADS_PER_DIM
#include "kmeans_hip_kernel.hip"
//#define BLOCK_DELTA_REDUCE
//#define BLOCK_CENTER_REDUCE
#define CPU_DELTA_REDUCE
#define CPU_CENTER_REDUCE
extern "C"
int setup(int argc, char** argv); /* function prototype */
// GLOBAL!!!!!
unsigned int num_threads_perdim = THREADS_PER_DIM; /* sqrt(256) -- see references for this choice */
unsigned int num_blocks_perdim = BLOCKS_PER_DIM; /* temporary */
unsigned int num_threads = num_threads_perdim*num_threads_perdim; /* number of threads */
unsigned int num_blocks = num_blocks_perdim*num_blocks_perdim; /* number of blocks */
/* _d denotes it resides on the device */
int *membership_new; /* newly assignment membership */
float *feature_d; /* inverted data array */
float *feature_flipped_d; /* original (not inverted) data array */
int *membership_d; /* membership on the device */
float *block_new_centers; /* sum of points in a cluster (per block) */
float *clusters_d; /* cluster centers on the device */
float *block_clusters_d; /* per block calculation of cluster centers */
int *block_deltas_d; /* per block calculation of deltas */
/* -------------- allocateMemory() ------------------- */
/* allocate device memory, calculate number of blocks and threads, and invert the data array */
extern "C"
void allocateMemory(int npoints, int nfeatures, int nclusters, float **features)
{
num_blocks = npoints / num_threads;
if (npoints % num_threads > 0) /* defeat truncation */
num_blocks++;
num_blocks_perdim = sqrt((double) num_blocks);
while (num_blocks_perdim * num_blocks_perdim < num_blocks) // defeat truncation (should run once)
num_blocks_perdim++;
num_blocks = num_blocks_perdim*num_blocks_perdim;
/* allocate memory for memory_new[] and initialize to -1 (host) */
membership_new = (int*) malloc(npoints * sizeof(int));
for(int i=0;i<npoints;i++) {
membership_new[i] = -1;
}
/* allocate memory for block_new_centers[] (host) */
block_new_centers = (float *) malloc(nclusters*nfeatures*sizeof(float));
/* allocate memory for feature_flipped_d[][], feature_d[][] (device) */
hipMalloc((void**) &feature_flipped_d, npoints*nfeatures*sizeof(float));
hipMemcpy(feature_flipped_d, features[0], npoints*nfeatures*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**) &feature_d, npoints*nfeatures*sizeof(float));
/* invert the data array (kernel execution) */
hipLaunchKernelGGL(( invert_mapping), dim3(num_blocks),dim3(num_threads), 0, 0, feature_flipped_d,feature_d,npoints,nfeatures);
/* allocate memory for membership_d[] and clusters_d[][] (device) */
hipMalloc((void**) &membership_d, npoints*sizeof(int));
hipMalloc((void**) &clusters_d, nclusters*nfeatures*sizeof(float));
#ifdef BLOCK_DELTA_REDUCE
// allocate array to hold the per block deltas on the gpu side
hipMalloc((void**) &block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int));
//hipMemcpy(block_delta_d, &delta_h, sizeof(int), hipMemcpyHostToDevice);
#endif
#ifdef BLOCK_CENTER_REDUCE
// allocate memory and copy to card cluster array in which to accumulate center points for the next iteration
hipMalloc((void**) &block_clusters_d,
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(float));
//hipMemcpy(new_clusters_d, new_centers[0], nclusters*nfeatures*sizeof(float), hipMemcpyHostToDevice);
#endif
}
/* -------------- allocateMemory() end ------------------- */
/* -------------- deallocateMemory() ------------------- */
/* free host and device memory */
extern "C"
void deallocateMemory()
{
free(membership_new);
free(block_new_centers);
hipFree(feature_d);
hipFree(feature_flipped_d);
hipFree(membership_d);
hipFree(clusters_d);
#ifdef BLOCK_CENTER_REDUCE
hipFree(block_clusters_d);
#endif
#ifdef BLOCK_DELTA_REDUCE
hipFree(block_deltas_d);
#endif
}
/* -------------- deallocateMemory() end ------------------- */
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
////////////////////////////////////////////////////////////////////////////////
// Program main //
int
main( int argc, char** argv)
{
// make sure we're running on the big card
hipSetDevice(1);
// as done in the CUDA start/help document provided
setup(argc, argv);
}
// //
////////////////////////////////////////////////////////////////////////////////
/* ------------------- kmeansCuda() ------------------------ */
extern "C"
int // delta -- had problems when return value was of float type
kmeansCuda(float **feature, /* in: [npoints][nfeatures] */
int nfeatures, /* number of attributes for each point */
int npoints, /* number of data points */
int nclusters, /* number of clusters */
int *membership, /* which cluster the point belongs to */
float **clusters, /* coordinates of cluster centers */
int *new_centers_len, /* number of elements in each cluster */
float **new_centers, /* sum of elements in each cluster */
double *kernel_time
)
{
int delta = 0; /* if point has moved */
int i,j; /* counters */
hipSetDevice(1);
/* copy membership (host to device) */
hipMemcpy(membership_d, membership_new, npoints*sizeof(int), hipMemcpyHostToDevice);
/* copy clusters (host to device) */
hipMemcpy(clusters_d, clusters[0], nclusters*nfeatures*sizeof(float), hipMemcpyHostToDevice);
/* set up texture */
hipChannelFormatDesc chDesc0 = hipCreateChannelDesc<float>();
t_features.filterMode = hipFilterModePoint;
t_features.normalized = false;
t_features.channelDesc = chDesc0;
if(hipBindTexture(NULL, &t_features, feature_d, &chDesc0, npoints*nfeatures*sizeof(float)) != hipSuccess)
printf("Couldn't bind features array to texture!\n");
hipChannelFormatDesc chDesc1 = hipCreateChannelDesc<float>();
t_features_flipped.filterMode = hipFilterModePoint;
t_features_flipped.normalized = false;
t_features_flipped.channelDesc = chDesc1;
if(hipBindTexture(NULL, &t_features_flipped, feature_flipped_d, &chDesc1, npoints*nfeatures*sizeof(float)) != hipSuccess)
printf("Couldn't bind features_flipped array to texture!\n");
hipChannelFormatDesc chDesc2 = hipCreateChannelDesc<float>();
t_clusters.filterMode = hipFilterModePoint;
t_clusters.normalized = false;
t_clusters.channelDesc = chDesc2;
if(hipBindTexture(NULL, &t_clusters, clusters_d, &chDesc2, nclusters*nfeatures*sizeof(float)) != hipSuccess)
printf("Couldn't bind clusters array to texture!\n");
/* copy clusters to constant memory */
hipMemcpyToSymbol("c_clusters",clusters[0],nclusters*nfeatures*sizeof(float),0,hipMemcpyHostToDevice);
/* setup execution parameters.
changed to 2d (source code on NVIDIA CUDA Programming Guide) */
dim3 grid( num_blocks_perdim, num_blocks_perdim );
dim3 threads( num_threads_perdim*num_threads_perdim );
double time = mysecond();
/* execute the kernel */
hipLaunchKernelGGL(( kmeansPoint), dim3(grid), dim3(threads) , 0, 0, feature_d,
nfeatures,
npoints,
nclusters,
membership_d,
clusters_d,
block_clusters_d,
block_deltas_d);
hipDeviceSynchronize();
*kernel_time = mysecond()-time;
/* copy back membership (device to host) */
hipMemcpy(membership_new, membership_d, npoints*sizeof(int), hipMemcpyDeviceToHost);
#ifdef BLOCK_CENTER_REDUCE
/*** Copy back arrays of per block sums ***/
float * block_clusters_h = (float *) malloc(
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(float));
hipMemcpy(block_clusters_h, block_clusters_d,
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(float),
hipMemcpyDeviceToHost);
#endif
#ifdef BLOCK_DELTA_REDUCE
int * block_deltas_h = (int *) malloc(
num_blocks_perdim * num_blocks_perdim * sizeof(int));
hipMemcpy(block_deltas_h, block_deltas_d,
num_blocks_perdim * num_blocks_perdim * sizeof(int),
hipMemcpyDeviceToHost);
#endif
/* for each point, sum data points in each cluster
and see if membership has changed:
if so, increase delta and change old membership, and update new_centers;
otherwise, update new_centers */
delta = 0;
for (i = 0; i < npoints; i++)
{
int cluster_id = membership_new[i];
new_centers_len[cluster_id]++;
if (membership_new[i] != membership[i])
{
#ifdef CPU_DELTA_REDUCE
delta++;
#endif
membership[i] = membership_new[i];
}
#ifdef CPU_CENTER_REDUCE
for (j = 0; j < nfeatures; j++)
{
new_centers[cluster_id][j] += feature[i][j];
}
#endif
}
#ifdef BLOCK_DELTA_REDUCE
/*** calculate global sums from per block sums for delta and the new centers ***/
//debug
//printf("\t \t reducing %d block sums to global sum \n",num_blocks_perdim * num_blocks_perdim);
for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) {
//printf("block %d delta is %d \n",i,block_deltas_h[i]);
delta += block_deltas_h[i];
}
#endif
#ifdef BLOCK_CENTER_REDUCE
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
block_new_centers[j*nfeatures + k] = 0.f;
}
}
for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) {
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
block_new_centers[j*nfeatures + k] += block_clusters_h[i * nclusters*nfeatures + j * nfeatures + k];
}
}
}
#ifdef CPU_CENTER_REDUCE
//debug
/*for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
if(new_centers[j][k] > 1.001 * block_new_centers[j*nfeatures + k] || new_centers[j][k] < 0.999 * block_new_centers[j*nfeatures + k]) {
printf("\t \t for %d:%d, normal value is %e and gpu reduced value id %e \n",j,k,new_centers[j][k],block_new_centers[j*nfeatures + k]);
}
}
}*/
#endif
#ifdef BLOCK_CENTER_REDUCE
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++)
new_centers[j][k]= block_new_centers[j*nfeatures + k];
}
#endif
#endif
return delta;
}
/* ------------------- kmeansCuda() end ------------------------ */
| 8a0727e4f22abac4369138583f24629f2e364cb5.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <omp.h>
#include <cuda.h>
#define THREADS_PER_DIM 16
#define BLOCKS_PER_DIM 16
#define THREADS_PER_BLOCK THREADS_PER_DIM*THREADS_PER_DIM
#include "kmeans_cuda_kernel.cu"
//#define BLOCK_DELTA_REDUCE
//#define BLOCK_CENTER_REDUCE
#define CPU_DELTA_REDUCE
#define CPU_CENTER_REDUCE
extern "C"
int setup(int argc, char** argv); /* function prototype */
// GLOBAL!!!!!
unsigned int num_threads_perdim = THREADS_PER_DIM; /* sqrt(256) -- see references for this choice */
unsigned int num_blocks_perdim = BLOCKS_PER_DIM; /* temporary */
unsigned int num_threads = num_threads_perdim*num_threads_perdim; /* number of threads */
unsigned int num_blocks = num_blocks_perdim*num_blocks_perdim; /* number of blocks */
/* _d denotes it resides on the device */
int *membership_new; /* newly assignment membership */
float *feature_d; /* inverted data array */
float *feature_flipped_d; /* original (not inverted) data array */
int *membership_d; /* membership on the device */
float *block_new_centers; /* sum of points in a cluster (per block) */
float *clusters_d; /* cluster centers on the device */
float *block_clusters_d; /* per block calculation of cluster centers */
int *block_deltas_d; /* per block calculation of deltas */
/* -------------- allocateMemory() ------------------- */
/* allocate device memory, calculate number of blocks and threads, and invert the data array */
extern "C"
void allocateMemory(int npoints, int nfeatures, int nclusters, float **features)
{
num_blocks = npoints / num_threads;
if (npoints % num_threads > 0) /* defeat truncation */
num_blocks++;
num_blocks_perdim = sqrt((double) num_blocks);
while (num_blocks_perdim * num_blocks_perdim < num_blocks) // defeat truncation (should run once)
num_blocks_perdim++;
num_blocks = num_blocks_perdim*num_blocks_perdim;
/* allocate memory for memory_new[] and initialize to -1 (host) */
membership_new = (int*) malloc(npoints * sizeof(int));
for(int i=0;i<npoints;i++) {
membership_new[i] = -1;
}
/* allocate memory for block_new_centers[] (host) */
block_new_centers = (float *) malloc(nclusters*nfeatures*sizeof(float));
/* allocate memory for feature_flipped_d[][], feature_d[][] (device) */
cudaMalloc((void**) &feature_flipped_d, npoints*nfeatures*sizeof(float));
cudaMemcpy(feature_flipped_d, features[0], npoints*nfeatures*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**) &feature_d, npoints*nfeatures*sizeof(float));
/* invert the data array (kernel execution) */
invert_mapping<<<num_blocks,num_threads>>>(feature_flipped_d,feature_d,npoints,nfeatures);
/* allocate memory for membership_d[] and clusters_d[][] (device) */
cudaMalloc((void**) &membership_d, npoints*sizeof(int));
cudaMalloc((void**) &clusters_d, nclusters*nfeatures*sizeof(float));
#ifdef BLOCK_DELTA_REDUCE
// allocate array to hold the per block deltas on the gpu side
cudaMalloc((void**) &block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int));
//cudaMemcpy(block_delta_d, &delta_h, sizeof(int), cudaMemcpyHostToDevice);
#endif
#ifdef BLOCK_CENTER_REDUCE
// allocate memory and copy to card cluster array in which to accumulate center points for the next iteration
cudaMalloc((void**) &block_clusters_d,
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(float));
//cudaMemcpy(new_clusters_d, new_centers[0], nclusters*nfeatures*sizeof(float), cudaMemcpyHostToDevice);
#endif
}
/* -------------- allocateMemory() end ------------------- */
/* -------------- deallocateMemory() ------------------- */
/* free host and device memory */
extern "C"
void deallocateMemory()
{
free(membership_new);
free(block_new_centers);
cudaFree(feature_d);
cudaFree(feature_flipped_d);
cudaFree(membership_d);
cudaFree(clusters_d);
#ifdef BLOCK_CENTER_REDUCE
cudaFree(block_clusters_d);
#endif
#ifdef BLOCK_DELTA_REDUCE
cudaFree(block_deltas_d);
#endif
}
/* -------------- deallocateMemory() end ------------------- */
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
////////////////////////////////////////////////////////////////////////////////
// Program main //
int
main( int argc, char** argv)
{
// make sure we're running on the big card
cudaSetDevice(1);
// as done in the CUDA start/help document provided
setup(argc, argv);
}
// //
////////////////////////////////////////////////////////////////////////////////
/* ------------------- kmeansCuda() ------------------------ */
extern "C"
int // delta -- had problems when return value was of float type
kmeansCuda(float **feature, /* in: [npoints][nfeatures] */
int nfeatures, /* number of attributes for each point */
int npoints, /* number of data points */
int nclusters, /* number of clusters */
int *membership, /* which cluster the point belongs to */
float **clusters, /* coordinates of cluster centers */
int *new_centers_len, /* number of elements in each cluster */
float **new_centers, /* sum of elements in each cluster */
double *kernel_time
)
{
int delta = 0; /* if point has moved */
int i,j; /* counters */
cudaSetDevice(1);
/* copy membership (host to device) */
cudaMemcpy(membership_d, membership_new, npoints*sizeof(int), cudaMemcpyHostToDevice);
/* copy clusters (host to device) */
cudaMemcpy(clusters_d, clusters[0], nclusters*nfeatures*sizeof(float), cudaMemcpyHostToDevice);
/* set up texture */
cudaChannelFormatDesc chDesc0 = cudaCreateChannelDesc<float>();
t_features.filterMode = cudaFilterModePoint;
t_features.normalized = false;
t_features.channelDesc = chDesc0;
if(cudaBindTexture(NULL, &t_features, feature_d, &chDesc0, npoints*nfeatures*sizeof(float)) != CUDA_SUCCESS)
printf("Couldn't bind features array to texture!\n");
cudaChannelFormatDesc chDesc1 = cudaCreateChannelDesc<float>();
t_features_flipped.filterMode = cudaFilterModePoint;
t_features_flipped.normalized = false;
t_features_flipped.channelDesc = chDesc1;
if(cudaBindTexture(NULL, &t_features_flipped, feature_flipped_d, &chDesc1, npoints*nfeatures*sizeof(float)) != CUDA_SUCCESS)
printf("Couldn't bind features_flipped array to texture!\n");
cudaChannelFormatDesc chDesc2 = cudaCreateChannelDesc<float>();
t_clusters.filterMode = cudaFilterModePoint;
t_clusters.normalized = false;
t_clusters.channelDesc = chDesc2;
if(cudaBindTexture(NULL, &t_clusters, clusters_d, &chDesc2, nclusters*nfeatures*sizeof(float)) != CUDA_SUCCESS)
printf("Couldn't bind clusters array to texture!\n");
/* copy clusters to constant memory */
cudaMemcpyToSymbol("c_clusters",clusters[0],nclusters*nfeatures*sizeof(float),0,cudaMemcpyHostToDevice);
/* setup execution parameters.
changed to 2d (source code on NVIDIA CUDA Programming Guide) */
dim3 grid( num_blocks_perdim, num_blocks_perdim );
dim3 threads( num_threads_perdim*num_threads_perdim );
double time = mysecond();
/* execute the kernel */
kmeansPoint<<< grid, threads >>>( feature_d,
nfeatures,
npoints,
nclusters,
membership_d,
clusters_d,
block_clusters_d,
block_deltas_d);
cudaThreadSynchronize();
*kernel_time = mysecond()-time;
/* copy back membership (device to host) */
cudaMemcpy(membership_new, membership_d, npoints*sizeof(int), cudaMemcpyDeviceToHost);
#ifdef BLOCK_CENTER_REDUCE
/*** Copy back arrays of per block sums ***/
float * block_clusters_h = (float *) malloc(
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(float));
cudaMemcpy(block_clusters_h, block_clusters_d,
num_blocks_perdim * num_blocks_perdim *
nclusters * nfeatures * sizeof(float),
cudaMemcpyDeviceToHost);
#endif
#ifdef BLOCK_DELTA_REDUCE
int * block_deltas_h = (int *) malloc(
num_blocks_perdim * num_blocks_perdim * sizeof(int));
cudaMemcpy(block_deltas_h, block_deltas_d,
num_blocks_perdim * num_blocks_perdim * sizeof(int),
cudaMemcpyDeviceToHost);
#endif
/* for each point, sum data points in each cluster
and see if membership has changed:
if so, increase delta and change old membership, and update new_centers;
otherwise, update new_centers */
delta = 0;
for (i = 0; i < npoints; i++)
{
int cluster_id = membership_new[i];
new_centers_len[cluster_id]++;
if (membership_new[i] != membership[i])
{
#ifdef CPU_DELTA_REDUCE
delta++;
#endif
membership[i] = membership_new[i];
}
#ifdef CPU_CENTER_REDUCE
for (j = 0; j < nfeatures; j++)
{
new_centers[cluster_id][j] += feature[i][j];
}
#endif
}
#ifdef BLOCK_DELTA_REDUCE
/*** calculate global sums from per block sums for delta and the new centers ***/
//debug
//printf("\t \t reducing %d block sums to global sum \n",num_blocks_perdim * num_blocks_perdim);
for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) {
//printf("block %d delta is %d \n",i,block_deltas_h[i]);
delta += block_deltas_h[i];
}
#endif
#ifdef BLOCK_CENTER_REDUCE
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
block_new_centers[j*nfeatures + k] = 0.f;
}
}
for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) {
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
block_new_centers[j*nfeatures + k] += block_clusters_h[i * nclusters*nfeatures + j * nfeatures + k];
}
}
}
#ifdef CPU_CENTER_REDUCE
//debug
/*for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++) {
if(new_centers[j][k] > 1.001 * block_new_centers[j*nfeatures + k] || new_centers[j][k] < 0.999 * block_new_centers[j*nfeatures + k]) {
printf("\t \t for %d:%d, normal value is %e and gpu reduced value id %e \n",j,k,new_centers[j][k],block_new_centers[j*nfeatures + k]);
}
}
}*/
#endif
#ifdef BLOCK_CENTER_REDUCE
for(int j = 0; j < nclusters;j++) {
for(int k = 0; k < nfeatures;k++)
new_centers[j][k]= block_new_centers[j*nfeatures + k];
}
#endif
#endif
return delta;
}
/* ------------------- kmeansCuda() end ------------------------ */
|
826f118a08d497689c112d086d4a62f96e2baf97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from ztrtri_diag.cu normal z -> c, Fri Jan 30 19:00:10 2015
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named ctrtri_diag.cu to avoid name conflict with src/ctrtri.o
in the library. The actual kernels are in ctrtri_lower.cu and ctrtri_upper.cu
*/
#include "common_magma.h"
#include "ctrtri.h"
/**
Inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in ctrsm.
Same as ctrtri_diag, but adds queue argument.
@ingroup magma_cblas3
********************************************************************/
/**
Purpose
-------
ctrtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA COMPLEX array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
d_dinvA COMPLEX array of dimension (NB, ((n+NB-1)/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_cblas3
********************************************************************/
extern "C" void
magmablas_ctrtri_diag_q(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr d_dinvA,
magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = (n + IB - 1)/IB;
hipMemset( d_dinvA, 0, ((n+NB-1)/NB)*NB*NB * sizeof(magmaFloatComplex) );
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
hipLaunchKernelGGL(( ctrtri_diag_lower_kernel), dim3(nblocks), dim3(IB), 0, queue , diag, n, dA, ldda, d_dinvA );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_cgemm16_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_cgemm16_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_cgemm32_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_cgemm32_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_cgemm64_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_cgemm64_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_cgemm_above64_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part3_lower_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
hipLaunchKernelGGL(( ctrtri_diag_upper_kernel), dim3(nblocks), dim3(IB), 0, queue , diag, n, dA, ldda, d_dinvA );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb*=2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_cgemm16_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_cgemm16_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_cgemm32_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_cgemm32_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_cgemm64_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_cgemm64_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_cgemm_above64_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_cgemm_above64_part3_upper_kernel), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
/**
@see magmablas_ctrtri_diag_q
@ingroup magma_cblas3
********************************************************************/
extern "C" void
magmablas_ctrtri_diag(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr d_dinvA)
{
magmablas_ctrtri_diag_q( uplo, diag, n, dA, ldda, d_dinvA, magma_stream );
}
| 826f118a08d497689c112d086d4a62f96e2baf97.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from ztrtri_diag.cu normal z -> c, Fri Jan 30 19:00:10 2015
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named ctrtri_diag.cu to avoid name conflict with src/ctrtri.o
in the library. The actual kernels are in ctrtri_lower.cu and ctrtri_upper.cu
*/
#include "common_magma.h"
#include "ctrtri.h"
/**
Inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in ctrsm.
Same as ctrtri_diag, but adds queue argument.
@ingroup magma_cblas3
********************************************************************/
/**
Purpose
-------
ctrtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA COMPLEX array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
d_dinvA COMPLEX array of dimension (NB, ((n+NB-1)/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_cblas3
********************************************************************/
extern "C" void
magmablas_ctrtri_diag_q(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr d_dinvA,
magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = (n + IB - 1)/IB;
cudaMemset( d_dinvA, 0, ((n+NB-1)/NB)*NB*NB * sizeof(magmaFloatComplex) );
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
ctrtri_diag_lower_kernel<<< nblocks, IB, 0, queue >>>( diag, n, dA, ldda, d_dinvA );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_cgemm16_part1_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_cgemm16_part2_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
triple_cgemm32_part1_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_cgemm32_part2_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
triple_cgemm64_part1_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_cgemm64_part2_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
default:
triple_cgemm_above64_part1_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_cgemm_above64_part2_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_cgemm_above64_part3_lower_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
ctrtri_diag_upper_kernel<<< nblocks, IB, 0, queue >>>( diag, n, dA, ldda, d_dinvA );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb*=2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_cgemm16_part1_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_cgemm16_part2_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
triple_cgemm32_part1_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_cgemm32_part2_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
triple_cgemm64_part1_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_cgemm64_part2_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
default:
triple_cgemm_above64_part1_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_cgemm_above64_part2_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_cgemm_above64_part3_upper_kernel<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
/**
@see magmablas_ctrtri_diag_q
@ingroup magma_cblas3
********************************************************************/
extern "C" void
magmablas_ctrtri_diag(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr d_dinvA)
{
magmablas_ctrtri_diag_q( uplo, diag, n, dA, ldda, d_dinvA, magma_stream );
}
|
6ee247dfd80d7ea52a491cc6f0faf0d7f9b1cf6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cassert>
#include<cstdint>
#include<cmath>
#include<random>
#include<vector>
// #define VERIFY
#ifdef VERIFY
#define COUNT(x) atomicAdd(&counters[x],1);
#else
__device__
void dummy(int){}
#define COUNT(x) dummy(x);
#endif
template<int STRIDE>
__global__
void nn(uint32_t * __restrict__ counters,
float const * __restrict__ z, float const * __restrict__ w, uint32_t * __restrict__ nns, int ntot, float eps) {
COUNT(0);
// this part is actually run STRIDE times for each "z"
auto ldx = blockIdx.x * blockDim.x + threadIdx.x;
auto idx = ldx/STRIDE;
auto first = ldx - idx*STRIDE;
assert(first<STRIDE);
// usual loop uder the assumption ntot is not kown on HOST side
auto incr = (blockDim.x * gridDim.x)/STRIDE;
for (auto j = idx; j < ntot; j += incr) {
COUNT(1)
// combinatorial loop (n^2)
// in reality it should be limited using a Histogram, KDTree or similar
// here we parallelize. for each "z[j]" STRIDE threads are actually used
auto k = j+ 1+first;
for (;k < ntot; k +=STRIDE) {
COUNT(2);
if (
fabs(z[j]-z[k]) < eps &&
fabs(w[j]-w[k]) < eps
) {
atomicAdd(&nns[j],1);
atomicAdd(&nns[k],1);
COUNT(3);
}
} // inner loop k
} // outer loop j
}
#include <cuda/api_wrappers.h>
#include<iostream>
constexpr uint32_t NTOT = 1024*8;
template<int STRIDE>
void go(uint32_t * c_d, float const * z_d, float const * w_d, uint32_t * nss_d) {
#ifdef VERIFY
uint32_t counters[10];
hipMemset(c_d,0,10*sizeof(uint32_t));
#endif
auto nt = 64;
auto nb = 1024*STRIDE;
hipLaunchKernelGGL(( nn<STRIDE>), dim3(nb),dim3(nt), 0, 0, c_d, z_d,w_d,nss_d,NTOT,0.1f);
#ifdef VERIFY
cuda::memory::copy(counters,c_d,10*sizeof(uint32_t));
std::cout << STRIDE << ' ' << NTOT;
for (int i=0; i<5; ++i) std::cout << ' ' << counters[i];
std::cout << std::endl;
#endif
}
int main() {
if (cuda::device::count() == 0) {
std::cerr << "No CUDA devices on this system" << "\n";
exit(EXIT_FAILURE);
}
auto current_device = cuda::device::current::get();
auto z_d = cuda::memory::device::make_unique<float[]>(current_device, NTOT);
auto w_d = cuda::memory::device::make_unique<float[]>(current_device, NTOT);
auto nns_d = cuda::memory::device::make_unique<uint32_t[]>(current_device, NTOT);
auto c_d = cuda::memory::device::make_unique<uint32_t[]>(current_device, 10);
for (int i=0; i<16; ++i) {
hipMemset(nns_d.get(),0,NTOT*sizeof(uint32_t));
std::vector<float> z_h(NTOT); // for "unclear" reasons this is now zeroed...
std::mt19937 reng;
std::uniform_real_distribution<float> rgen(-1.,1.);
for (auto & z : z_h) z = rgen(reng);
cuda::memory::copy(z_d.get(),z_h.data(),sizeof(float)*z_h.size());
for (auto & z : z_h) z = rgen(reng);
cuda::memory::copy(w_d.get(),z_h.data(),sizeof(float)*z_h.size());
go<1>(c_d.get(), z_d.get(),w_d.get(),nns_d.get());
go<2>(c_d.get(),z_d.get(),w_d.get(),nns_d.get());
go<4>(c_d.get(),z_d.get(),w_d.get(),nns_d.get());
go<8>(c_d.get(),z_d.get(),w_d.get(),nns_d.get());
go<16>(c_d.get(),z_d.get(),w_d.get(),nns_d.get());
}
hipDeviceSynchronize();
return 0;
}
| 6ee247dfd80d7ea52a491cc6f0faf0d7f9b1cf6c.cu | #include<cassert>
#include<cstdint>
#include<cmath>
#include<random>
#include<vector>
// #define VERIFY
#ifdef VERIFY
#define COUNT(x) atomicAdd(&counters[x],1);
#else
__device__
void dummy(int){}
#define COUNT(x) dummy(x);
#endif
template<int STRIDE>
__global__
void nn(uint32_t * __restrict__ counters,
float const * __restrict__ z, float const * __restrict__ w, uint32_t * __restrict__ nns, int ntot, float eps) {
COUNT(0);
// this part is actually run STRIDE times for each "z"
auto ldx = blockIdx.x * blockDim.x + threadIdx.x;
auto idx = ldx/STRIDE;
auto first = ldx - idx*STRIDE;
assert(first<STRIDE);
// usual loop uder the assumption ntot is not kown on HOST side
auto incr = (blockDim.x * gridDim.x)/STRIDE;
for (auto j = idx; j < ntot; j += incr) {
COUNT(1)
// combinatorial loop (n^2)
// in reality it should be limited using a Histogram, KDTree or similar
// here we parallelize. for each "z[j]" STRIDE threads are actually used
auto k = j+ 1+first;
for (;k < ntot; k +=STRIDE) {
COUNT(2);
if (
fabs(z[j]-z[k]) < eps &&
fabs(w[j]-w[k]) < eps
) {
atomicAdd(&nns[j],1);
atomicAdd(&nns[k],1);
COUNT(3);
}
} // inner loop k
} // outer loop j
}
#include <cuda/api_wrappers.h>
#include<iostream>
constexpr uint32_t NTOT = 1024*8;
template<int STRIDE>
void go(uint32_t * c_d, float const * z_d, float const * w_d, uint32_t * nss_d) {
#ifdef VERIFY
uint32_t counters[10];
cudaMemset(c_d,0,10*sizeof(uint32_t));
#endif
auto nt = 64;
auto nb = 1024*STRIDE;
nn<STRIDE><<<nb,nt>>>(c_d, z_d,w_d,nss_d,NTOT,0.1f);
#ifdef VERIFY
cuda::memory::copy(counters,c_d,10*sizeof(uint32_t));
std::cout << STRIDE << ' ' << NTOT;
for (int i=0; i<5; ++i) std::cout << ' ' << counters[i];
std::cout << std::endl;
#endif
}
int main() {
if (cuda::device::count() == 0) {
std::cerr << "No CUDA devices on this system" << "\n";
exit(EXIT_FAILURE);
}
auto current_device = cuda::device::current::get();
auto z_d = cuda::memory::device::make_unique<float[]>(current_device, NTOT);
auto w_d = cuda::memory::device::make_unique<float[]>(current_device, NTOT);
auto nns_d = cuda::memory::device::make_unique<uint32_t[]>(current_device, NTOT);
auto c_d = cuda::memory::device::make_unique<uint32_t[]>(current_device, 10);
for (int i=0; i<16; ++i) {
cudaMemset(nns_d.get(),0,NTOT*sizeof(uint32_t));
std::vector<float> z_h(NTOT); // for "unclear" reasons this is now zeroed...
std::mt19937 reng;
std::uniform_real_distribution<float> rgen(-1.,1.);
for (auto & z : z_h) z = rgen(reng);
cuda::memory::copy(z_d.get(),z_h.data(),sizeof(float)*z_h.size());
for (auto & z : z_h) z = rgen(reng);
cuda::memory::copy(w_d.get(),z_h.data(),sizeof(float)*z_h.size());
go<1>(c_d.get(), z_d.get(),w_d.get(),nns_d.get());
go<2>(c_d.get(),z_d.get(),w_d.get(),nns_d.get());
go<4>(c_d.get(),z_d.get(),w_d.get(),nns_d.get());
go<8>(c_d.get(),z_d.get(),w_d.get(),nns_d.get());
go<16>(c_d.get(),z_d.get(),w_d.get(),nns_d.get());
}
cudaDeviceSynchronize();
return 0;
}
|
a9a2a68d62fd63519916296d1b4f76feacf06f07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* By downloading, copying, installing or using the software you agree to this license.
* If you do not agree to this license, do not download, install,
* copy or use the software.
*
*
* License Agreement
* For Open Source Computer Vision Library
* (3-clause BSD License)
*
* Copyright (C) 2000-2020, Intel Corporation, all rights reserved.
* Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
* Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
* Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
* Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved.
* Copyright (C) 2015-2016, Itseez Inc., all rights reserved.
* Copyright (C) 2019-2020, Xperience AI, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the names of the copyright holders nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* This software is provided by the copyright holders and contributors "as is" and
* any express or implied warranties, including, but not limited to, the implied
* warranties of merchantability and fitness for a particular purpose are disclaimed.
* In no event shall copyright holders or contributors be liable for any direct,
* indirect, incidental, special, exemplary, or consequential damages
* (including, but not limited to, procurement of substitute goods or services;
* loss of use, data, or profits; or business interruption) however caused
* and on any theory of liability, whether in contract, strict liability,
* or tort (including negligence or otherwise) arising in any way out of
* the use of this software, even if advised of the possibility of such damage.
*
* ---------------------------------------------------------------------------
* \file dnn/src/cuda/warp_perspective/warp_perspective_cv.cu
*
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* This file has been modified by Megvii ("Megvii Modifications").
* All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
*
* ---------------------------------------------------------------------------
*/
#include "./warp_perspective_cv.cuh"
#include "src/cuda/cv/kernel_common.cuh"
#define at(A, r, c, ch) A[(r)*A##_step + (c)*CH + (ch)]
#define AB_BITS 10
#define AB_SCALE (1 << AB_BITS)
#define INTER_BITS 5
#define INTER_TAB_SIZE (1 << INTER_BITS)
#define INTER_REMAP_COEF_BITS 15
#define INTER_REMAP_COEF_SCALE (1 << INTER_REMAP_COEF_BITS)
#define ROUND_DELTA (1 << (AB_BITS - INTER_BITS - 1))
#define rep(i, n) for (int i = 0; i < (n); ++i)
#define BLOCK_THREADS_X0 64
#define BLOCK_THREADS_Y0 8
#define BLOCK_THREADS_X1 32
#define BLOCK_THREADS_Y1 8
#define PROCESS_PER_THREADS 8
namespace megdnn {
namespace cuda {
namespace warp_perspective {
//! transform matrix
__constant__ double M[9];
//! border_val
__constant__ float border_val;
using namespace megcv;
__global__ void preprocess_trans(double* trans, const float* src) {
//! The size is 9
#pragma unroll
for (size_t i = 0; i < 9; i++)
trans[i] = src[i];
}
template <typename T, size_t CH, BorderMode bmode>
__global__ void warp_perspective_cv_kernel_LAN_cacheToLandVECTOR(
const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step) {
int dc = threadIdx.x + blockIdx.x * blockDim.x;
int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS);
__shared__ double cols_data[BLOCK_THREADS_X1][3];
__shared__ double rows_data[BLOCK_THREADS_Y1 * PROCESS_PER_THREADS][3];
if (dr < dst_rows && dc < dst_cols) {
if (threadIdx.y == 0) {
cols_data[threadIdx.x][0] = M[0] * dc;
cols_data[threadIdx.x][1] = M[3] * dc;
cols_data[threadIdx.x][2] = M[6] * dc;
}
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
rows_data[threadIdx.y + i][0] = M[1] * (dr + i) + M[2];
rows_data[threadIdx.y + i][1] = M[4] * (dr + i) + M[5];
rows_data[threadIdx.y + i][2] = M[7] * (dr + i) + M[8];
}
}
}
__syncthreads();
if (dr < dst_rows && dc < dst_cols) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y + i][2];
w = (w == 0.000000) ? 0 : INTER_TAB_SIZE / w;
double fsc =
(cols_data[threadIdx.x][0] + rows_data[threadIdx.y + i][0]) * w;
double fsr =
(cols_data[threadIdx.x][1] + rows_data[threadIdx.y + i][1]) * w;
fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX;
fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN;
fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX;
fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN;
int sc = (int)lrint(fsc);
int sr = (int)lrint(fsr);
int fc = sc & (INTER_TAB_SIZE - 1);
int fr = sr & (INTER_TAB_SIZE - 1);
sc = sc >> INTER_BITS;
sr = sr >> INTER_BITS;
sc = sc < -32768 ? -32768 : (sc > 32767 ? 32767 : sc);
sr = sr < -32768 ? -32768 : (sr > 32767 ? 32767 : sr);
const int ksize = IModeTrait<INTER_LANCZOS4>::ksize;
float coefr[ksize], coefc[ksize];
int x[ksize], y[ksize];
if (bmode == BORDER_TRANSPARENT && ((unsigned)sr >= (unsigned)src_rows ||
(unsigned)sc >= (unsigned)src_cols)) {
continue;
}
interpolate_coefs<INTER_LANCZOS4>((float)fr / INTER_TAB_SIZE, coefr);
interpolate_coefs<INTER_LANCZOS4>((float)fc / INTER_TAB_SIZE, coefc);
const BorderMode bmode1 = BModeTrait<bmode>::bmode1;
{
#pragma unroll
rep(k, ksize) {
x[k] = border_interpolate<bmode1>(
sr + k - (ksize / 2) + 1, src_rows);
}
#pragma unroll
rep(k, ksize) {
y[k] = border_interpolate<bmode1>(
sc + k - (ksize / 2) + 1, src_cols);
}
}
float sum[CH] = {0};
rep(kr, ksize) {
if (x[kr] < 0) {
#pragma unroll
rep(ch, CH) sum[ch] += coefr[kr] * border_val;
continue;
}
#pragma unroll
rep(kc, ksize) {
if (y[kc] < 0) {
#pragma unroll
rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * border_val; }
} else {
#pragma unroll
rep(ch, CH) {
sum[ch] +=
coefr[kr] * coefc[kc] * at(src, x[kr], y[kc], ch);
}
}
}
}
#pragma unroll
rep(ch, CH) {
typedef typename TypeTrait<T>::WorkType WorkType;
if (dr + i < dst_rows) {
if (TypeTrait<T>::need_saturate) {
at(dst, dr + i, dc, ch) = saturate<WorkType>(
sum[ch], TypeTrait<T>::min(), TypeTrait<T>::max());
} else {
at(dst, dr + i, dc, ch) = sum[ch];
}
}
}
}
}
}
template <typename T, size_t CH, BorderMode bmode>
__global__ void warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR(
const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step) {
int dc = threadIdx.x + blockIdx.x * blockDim.x;
int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS);
__shared__ double cols_data[BLOCK_THREADS_X1][3];
__shared__ double rows_data[BLOCK_THREADS_Y1 * PROCESS_PER_THREADS][3];
if (dr < dst_rows && dc < dst_cols) {
if (threadIdx.y == 0) {
cols_data[threadIdx.x][0] = M[0] * dc;
cols_data[threadIdx.x][1] = M[3] * dc;
cols_data[threadIdx.x][2] = M[6] * dc;
}
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
rows_data[threadIdx.y + i][0] = M[1] * (dr + i) + M[2];
rows_data[threadIdx.y + i][1] = M[4] * (dr + i) + M[5];
rows_data[threadIdx.y + i][2] = M[7] * (dr + i) + M[8];
}
}
}
__syncthreads();
if (dr < dst_rows && dc < dst_cols) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y + i][2];
w = (w == 0.000000) ? 0 : INTER_TAB_SIZE / w;
double fsc =
(cols_data[threadIdx.x][0] + rows_data[threadIdx.y + i][0]) * w;
double fsr =
(cols_data[threadIdx.x][1] + rows_data[threadIdx.y + i][1]) * w;
fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX;
fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN;
fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX;
fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN;
int sc = (int)lrint(fsc);
int sr = (int)lrint(fsr);
int fc = sc & (INTER_TAB_SIZE - 1);
int fr = sr & (INTER_TAB_SIZE - 1);
sc = sc >> INTER_BITS;
sr = sr >> INTER_BITS;
sc = sc < -32768 ? -32768 : (sc > 32767 ? 32767 : sc);
sr = sr < -32768 ? -32768 : (sr > 32767 ? 32767 : sr);
const int ksize = IModeTrait<INTER_CUBIC>::ksize;
float coefr[ksize], coefc[ksize];
int x[ksize], y[ksize];
if (bmode == BORDER_TRANSPARENT && ((unsigned)sr >= (unsigned)src_rows ||
(unsigned)sc >= (unsigned)src_cols)) {
continue;
}
interpolate_coefs<INTER_CUBIC>((float)fr / INTER_TAB_SIZE, coefr);
interpolate_coefs<INTER_CUBIC>((float)fc / INTER_TAB_SIZE, coefc);
const BorderMode bmode1 = BModeTrait<bmode>::bmode1;
{
#pragma unroll
rep(k, ksize) {
x[k] = border_interpolate<bmode1>(
sr + k - (ksize / 2) + 1, src_rows);
}
#pragma unroll
rep(k, ksize) {
y[k] = border_interpolate<bmode1>(
sc + k - (ksize / 2) + 1, src_cols);
}
}
float sum[CH] = {0};
rep(kr, ksize) {
if (x[kr] < 0) {
#pragma unroll
rep(ch, CH) sum[ch] += coefr[kr] * border_val;
continue;
}
#pragma unroll
rep(kc, ksize) {
if (y[kc] < 0) {
#pragma unroll
rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * border_val; }
} else {
#pragma unroll
rep(ch, CH) {
sum[ch] +=
coefr[kr] * coefc[kc] * at(src, x[kr], y[kc], ch);
}
}
}
}
#pragma unroll
rep(ch, CH) {
typedef typename TypeTrait<T>::WorkType WorkType;
if (dr + i < dst_rows) {
if (TypeTrait<T>::need_saturate) {
at(dst, dr + i, dc, ch) = saturate<WorkType>(
sum[ch], TypeTrait<T>::min(), TypeTrait<T>::max());
} else {
at(dst, dr + i, dc, ch) = sum[ch];
}
}
}
}
}
}
template <typename T, size_t CH, BorderMode bmode>
__global__ void warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR(
const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step) {
int dc = threadIdx.x + blockIdx.x * blockDim.x;
int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS);
__shared__ double cols_data[BLOCK_THREADS_X1][3];
__shared__ double rows_data[BLOCK_THREADS_Y1 * PROCESS_PER_THREADS][3];
if (dr < dst_rows && dc < dst_cols) {
if (threadIdx.y == 0) {
cols_data[threadIdx.x][0] = M[0] * dc;
cols_data[threadIdx.x][1] = M[3] * dc;
cols_data[threadIdx.x][2] = M[6] * dc;
}
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
rows_data[threadIdx.y + i][0] = M[1] * (dr + i) + M[2];
rows_data[threadIdx.y + i][1] = M[4] * (dr + i) + M[5];
rows_data[threadIdx.y + i][2] = M[7] * (dr + i) + M[8];
}
}
}
__syncthreads();
if (dr < dst_rows && dc < dst_cols) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y + i][2];
w = (w == 0.000000) ? 0 : INTER_TAB_SIZE / w;
double fsc =
(cols_data[threadIdx.x][0] + rows_data[threadIdx.y + i][0]) * w;
double fsr =
(cols_data[threadIdx.x][1] + rows_data[threadIdx.y + i][1]) * w;
fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX;
fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN;
fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX;
fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN;
int sc = (int)lrint(fsc);
int sr = (int)lrint(fsr);
int fc = sc & (INTER_TAB_SIZE - 1);
int fr = sr & (INTER_TAB_SIZE - 1);
sc = sc >> INTER_BITS;
sr = sr >> INTER_BITS;
sc = sc < -32768 ? -32768 : (sc > 32767 ? 32767 : sc);
sr = sr < -32768 ? -32768 : (sr > 32767 ? 32767 : sr);
const int ksize = IModeTrait<INTER_LINEAR>::ksize;
float coefr[ksize], coefc[ksize];
int x[ksize], y[ksize];
if (bmode == BORDER_TRANSPARENT &&
((unsigned)(sr + 1) >= (unsigned)src_rows ||
(unsigned)(sc + 1) >= (unsigned)src_cols)) {
continue;
}
interpolate_coefs<INTER_LINEAR>((float)fr / INTER_TAB_SIZE, coefr);
interpolate_coefs<INTER_LINEAR>((float)fc / INTER_TAB_SIZE, coefc);
const BorderMode bmode1 = BModeTrait<bmode>::bmode1;
{
#pragma unroll
rep(k, ksize) {
x[k] = border_interpolate<bmode1>(
sr + k - (ksize / 2) + 1, src_rows);
}
#pragma unroll
rep(k, ksize) {
y[k] = border_interpolate<bmode1>(
sc + k - (ksize / 2) + 1, src_cols);
}
}
float sum[CH] = {0};
rep(kr, ksize) {
if (x[kr] < 0) {
#pragma unroll
rep(ch, CH) sum[ch] += coefr[kr] * border_val;
continue;
}
#pragma unroll
rep(kc, ksize) {
if (y[kc] < 0) {
#pragma unroll
rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * border_val; }
} else {
#pragma unroll
rep(ch, CH) {
sum[ch] +=
coefr[kr] * coefc[kc] * at(src, x[kr], y[kc], ch);
}
}
}
}
#pragma unroll
rep(ch, CH) {
typedef typename TypeTrait<T>::WorkType WorkType;
if (dr + i < dst_rows) {
if (TypeTrait<T>::need_saturate) {
at(dst, dr + i, dc, ch) = saturate<WorkType>(
sum[ch], TypeTrait<T>::min(), TypeTrait<T>::max());
} else {
at(dst, dr + i, dc, ch) = sum[ch];
}
}
}
}
}
}
template <typename T, size_t CH, BorderMode bmode>
__global__ void warp_perspective_cv_kernel_cacheToL_NEAREST(
const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step) {
#define SET_DST_CH_VALUE \
if (CH == 1) { \
dst[dst_address_increase] = src[src_address_increase]; \
} else { \
dst[dst_address_increase] = src[src_address_increase]; \
dst[dst_address_increase + 1] = src[src_address_increase + 1]; \
dst[dst_address_increase + 2] = src[src_address_increase + 2]; \
}
int dc = threadIdx.x + blockIdx.x * blockDim.x;
int dr = threadIdx.y + blockIdx.y * blockDim.y;
__shared__ double cols_data[BLOCK_THREADS_X1][3];
__shared__ double rows_data[BLOCK_THREADS_Y1][3];
if (dr < dst_rows && dc < dst_cols) {
if (threadIdx.y == 0) {
cols_data[threadIdx.x][0] = M[0] * dc;
cols_data[threadIdx.x][1] = M[3] * dc;
cols_data[threadIdx.x][2] = M[6] * dc;
}
if (threadIdx.x == 0) {
rows_data[threadIdx.y][0] = M[1] * dr + M[2];
rows_data[threadIdx.y][1] = M[4] * dr + M[5];
rows_data[threadIdx.y][2] = M[7] * dr + M[8];
}
}
__syncthreads();
if (dr < dst_rows && dc < dst_cols) {
double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y][2];
w = (w == 0) ? 0 : 1 / w;
double fsc = (cols_data[threadIdx.x][0] + rows_data[threadIdx.y][0]) * w;
double fsr = (cols_data[threadIdx.x][1] + rows_data[threadIdx.y][1]) * w;
int sc = saturate_cast_short(fsc);
int sr = saturate_cast_short(fsr);
size_t dst_address_increase = dr * dst_step + dc * CH;
if ((size_t)sc < src_cols && (size_t)sr < src_rows) {
size_t src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
return;
}
if (bmode == BORDER_REPLICATE) {
sr = saturate(sr, 0, (int)src_rows - 1);
sc = saturate(sc, 0, (int)src_cols - 1);
size_t src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
} else if (bmode == BORDER_CONSTANT) {
if (CH == 1) {
dst[dst_address_increase] = border_val;
} else {
dst[dst_address_increase + 0] = border_val;
dst[dst_address_increase + 1] = border_val;
dst[dst_address_increase + 2] = border_val;
}
} else if (bmode != BORDER_TRANSPARENT) {
sr = border_interpolate<bmode>(sr, src_rows);
sc = border_interpolate<bmode>(sc, src_cols);
size_t src_address_increase = sr * src_step + sc * CH;
src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
}
}
#undef SET_DST_CH_VALUE
}
template <typename T, size_t CH, BorderMode bmode>
__global__ void warp_perspective_cv_kernel_NEAREST_VECTOR(
const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step) {
int dc = threadIdx.x + blockIdx.x * blockDim.x;
int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS);
#define SET_DST_CH_VALUE \
if (CH == 1) { \
dst[dst_address_increase] = src[src_address_increase]; \
} else { \
dst[dst_address_increase] = src[src_address_increase]; \
dst[dst_address_increase + 1] = src[src_address_increase + 1]; \
dst[dst_address_increase + 2] = src[src_address_increase + 2]; \
}
if (dr < dst_rows && dc < dst_cols) {
double w0 = M[6] * dc + M[7] * dr + M[8];
double fc0 = M[0] * dc + M[1] * dr + M[2];
double fr0 = M[3] * dc + M[4] * dr + M[5];
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
if (dr + i >= dst_rows)
return;
//! To make the result equal to the naive version
double w = w0 + M[7] * i;
w = w ? 1. / w : 0;
double fsc = (fc0 + M[1] * i) * w;
double fsr = (fr0 + M[4] * i) * w;
fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX;
fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN;
fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX;
fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN;
int sc = saturate_cast_short(fsc);
int sr = saturate_cast_short(fsr);
size_t dst_address_increase = (dr + i) * dst_step + dc * CH;
if ((size_t)sc < src_cols && (size_t)sr < src_rows) {
size_t src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
continue;
}
if (bmode == BORDER_REPLICATE) {
sr = saturate(sr, 0, (int)src_rows - 1);
sc = saturate(sc, 0, (int)src_cols - 1);
size_t src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
} else if (bmode == BORDER_CONSTANT) {
if (CH == 1) {
dst[dst_address_increase] = border_val;
} else {
dst[dst_address_increase + 0] = border_val;
dst[dst_address_increase + 1] = border_val;
dst[dst_address_increase + 2] = border_val;
}
} else if (bmode != BORDER_TRANSPARENT) {
sr = border_interpolate<bmode>(sr, src_rows);
sc = border_interpolate<bmode>(sc, src_cols);
size_t src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
}
}
}
#undef SET_DST_CH_VALUE
}
template <typename T, size_t CH>
void warp_perspective_cv_proxy(
const T* src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step, BorderMode bmode, InterpolationMode imode,
const float* trans, const T bval, double* workspace, hipStream_t stream) {
hipLaunchKernelGGL(( preprocess_trans), dim3(1), dim3(1), 0, stream, workspace, trans);
cuda_check(hipStreamSynchronize(stream));
//! Copy trans to const memory
cuda_check(hipMemcpyToSymbol(
M, workspace, sizeof(double) * 9, 0, hipMemcpyHostToDevice));
//! Copy bval to const memory
cuda_check(hipMemcpyToSymbol(
border_val, &bval, sizeof(float), 0, hipMemcpyHostToDevice));
dim3 THREADS, BLOCKS;
dim3 THREADS_VECTOR, BLOCKS_VECTOR;
switch (imode) {
case INTER_NEAREST:
if (CH == 3 && sizeof(T) == sizeof(float)) {
THREADS.x = BLOCK_THREADS_X1;
THREADS.y = BLOCK_THREADS_Y1;
BLOCKS.x = DIVUP(dst_cols, THREADS.x);
BLOCKS.y = DIVUP(dst_rows, THREADS.y);
switch (bmode) {
case BORDER_REPLICATE:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_cacheToL_NEAREST<
T, CH, BORDER_REPLICATE>)
, dim3(BLOCKS), dim3(THREADS), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_REFLECT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_cacheToL_NEAREST<
T, CH, BORDER_REFLECT>), dim3(BLOCKS), dim3(THREADS), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_REFLECT_101:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_cacheToL_NEAREST<
T, CH, BORDER_REFLECT_101>)
, dim3(BLOCKS), dim3(THREADS), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_WRAP:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_cacheToL_NEAREST<T, CH, BORDER_WRAP>)
, dim3(BLOCKS), dim3(THREADS), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_CONSTANT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_cacheToL_NEAREST<
T, CH, BORDER_CONSTANT>), dim3(BLOCKS), dim3(THREADS), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_TRANSPARENT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_cacheToL_NEAREST<
T, CH, BORDER_TRANSPARENT>)
, dim3(BLOCKS), dim3(THREADS), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
default:
break;
}
} else {
THREADS_VECTOR.x = BLOCK_THREADS_X1;
THREADS_VECTOR.y = BLOCK_THREADS_Y1;
BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x);
BLOCKS_VECTOR.y =
DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS);
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
switch (bmode) {
case BORDER_REPLICATE:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_NEAREST_VECTOR<
T, CH, BORDER_REPLICATE>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_REFLECT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_NEAREST_VECTOR<T, CH, BORDER_REFLECT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_REFLECT_101:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_NEAREST_VECTOR<
T, CH, BORDER_REFLECT_101>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_WRAP:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_NEAREST_VECTOR<T, CH, BORDER_WRAP>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_CONSTANT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_NEAREST_VECTOR<
T, CH, BORDER_CONSTANT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_TRANSPARENT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_NEAREST_VECTOR<
T, CH, BORDER_TRANSPARENT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
default:
break;
}
}
break;
case INTER_LINEAR:
{
{
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
THREADS_VECTOR.x = BLOCK_THREADS_X1;
THREADS_VECTOR.y = BLOCK_THREADS_Y1;
BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x);
BLOCKS_VECTOR.y =
DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS);
switch (bmode) {
case BORDER_REPLICATE:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_REPLICATE>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_REFLECT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_REFLECT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_REFLECT_101:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_REFLECT_101>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_WRAP:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_WRAP>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_CONSTANT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_CONSTANT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_TRANSPARENT:
if (CH == 3)
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_TRANSPARENT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
default:
break;
}
}
}
break;
case INTER_CUBIC:
THREADS_VECTOR.x = BLOCK_THREADS_X1;
THREADS_VECTOR.y = BLOCK_THREADS_Y1;
BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x);
BLOCKS_VECTOR.y = DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS);
switch (bmode) {
case BORDER_REPLICATE:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_REPLICATE>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_REFLECT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_REFLECT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_REFLECT_101:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_REFLECT_101>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_WRAP:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_WRAP>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_CONSTANT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_CONSTANT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_TRANSPARENT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_TRANSPARENT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
default:
break;
}
break;
case INTER_LANCZOS4:
{
THREADS_VECTOR.x = BLOCK_THREADS_X1;
THREADS_VECTOR.y = BLOCK_THREADS_Y1;
BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x);
BLOCKS_VECTOR.y = DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS);
switch (bmode) {
case BORDER_REPLICATE:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<
T, CH, BORDER_REPLICATE>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_REFLECT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<
T, CH, BORDER_REFLECT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_REFLECT_101:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<
T, CH, BORDER_REFLECT_101>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_WRAP:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<T, CH, BORDER_WRAP>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_CONSTANT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<
T, CH, BORDER_CONSTANT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_TRANSPARENT:
hipLaunchKernelGGL(( warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<
T, CH, BORDER_TRANSPARENT>)
, dim3(BLOCKS_VECTOR), dim3(THREADS_VECTOR), 0, stream,
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
default:
break;
}
}
break;
default:
break;
}
}
template void warp_perspective_cv_proxy<float, 1>(
const float* src, float* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step, BorderMode bmode, InterpolationMode imode,
const float* trans, const float border_val, double* workspace,
hipStream_t stream);
template void warp_perspective_cv_proxy<uchar, 1>(
const uchar* src, uchar* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step, BorderMode bmode, InterpolationMode imode,
const float* trans, const uchar border_val, double* workspace,
hipStream_t stream);
template void warp_perspective_cv_proxy<float, 3>(
const float* src, float* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step, BorderMode bmode, InterpolationMode imode,
const float* trans, const float border_val, double* workspace,
hipStream_t stream);
template void warp_perspective_cv_proxy<uchar, 3>(
const uchar* src, uchar* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step, BorderMode bmode, InterpolationMode imode,
const float* trans, const uchar border_val, double* workspace,
hipStream_t stream);
} // namespace warp_perspective
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
| a9a2a68d62fd63519916296d1b4f76feacf06f07.cu | /**
* By downloading, copying, installing or using the software you agree to this license.
* If you do not agree to this license, do not download, install,
* copy or use the software.
*
*
* License Agreement
* For Open Source Computer Vision Library
* (3-clause BSD License)
*
* Copyright (C) 2000-2020, Intel Corporation, all rights reserved.
* Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
* Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
* Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
* Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved.
* Copyright (C) 2015-2016, Itseez Inc., all rights reserved.
* Copyright (C) 2019-2020, Xperience AI, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the names of the copyright holders nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* This software is provided by the copyright holders and contributors "as is" and
* any express or implied warranties, including, but not limited to, the implied
* warranties of merchantability and fitness for a particular purpose are disclaimed.
* In no event shall copyright holders or contributors be liable for any direct,
* indirect, incidental, special, exemplary, or consequential damages
* (including, but not limited to, procurement of substitute goods or services;
* loss of use, data, or profits; or business interruption) however caused
* and on any theory of liability, whether in contract, strict liability,
* or tort (including negligence or otherwise) arising in any way out of
* the use of this software, even if advised of the possibility of such damage.
*
* ---------------------------------------------------------------------------
* \file dnn/src/cuda/warp_perspective/warp_perspective_cv.cu
*
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* This file has been modified by Megvii ("Megvii Modifications").
* All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
*
* ---------------------------------------------------------------------------
*/
#include "./warp_perspective_cv.cuh"
#include "src/cuda/cv/kernel_common.cuh"
#define at(A, r, c, ch) A[(r)*A##_step + (c)*CH + (ch)]
#define AB_BITS 10
#define AB_SCALE (1 << AB_BITS)
#define INTER_BITS 5
#define INTER_TAB_SIZE (1 << INTER_BITS)
#define INTER_REMAP_COEF_BITS 15
#define INTER_REMAP_COEF_SCALE (1 << INTER_REMAP_COEF_BITS)
#define ROUND_DELTA (1 << (AB_BITS - INTER_BITS - 1))
#define rep(i, n) for (int i = 0; i < (n); ++i)
#define BLOCK_THREADS_X0 64
#define BLOCK_THREADS_Y0 8
#define BLOCK_THREADS_X1 32
#define BLOCK_THREADS_Y1 8
#define PROCESS_PER_THREADS 8
namespace megdnn {
namespace cuda {
namespace warp_perspective {
//! transform matrix
__constant__ double M[9];
//! border_val
__constant__ float border_val;
using namespace megcv;
__global__ void preprocess_trans(double* trans, const float* src) {
//! The size is 9
#pragma unroll
for (size_t i = 0; i < 9; i++)
trans[i] = src[i];
}
template <typename T, size_t CH, BorderMode bmode>
__global__ void warp_perspective_cv_kernel_LAN_cacheToLandVECTOR(
const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step) {
int dc = threadIdx.x + blockIdx.x * blockDim.x;
int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS);
__shared__ double cols_data[BLOCK_THREADS_X1][3];
__shared__ double rows_data[BLOCK_THREADS_Y1 * PROCESS_PER_THREADS][3];
if (dr < dst_rows && dc < dst_cols) {
if (threadIdx.y == 0) {
cols_data[threadIdx.x][0] = M[0] * dc;
cols_data[threadIdx.x][1] = M[3] * dc;
cols_data[threadIdx.x][2] = M[6] * dc;
}
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
rows_data[threadIdx.y + i][0] = M[1] * (dr + i) + M[2];
rows_data[threadIdx.y + i][1] = M[4] * (dr + i) + M[5];
rows_data[threadIdx.y + i][2] = M[7] * (dr + i) + M[8];
}
}
}
__syncthreads();
if (dr < dst_rows && dc < dst_cols) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y + i][2];
w = (w == 0.000000) ? 0 : INTER_TAB_SIZE / w;
double fsc =
(cols_data[threadIdx.x][0] + rows_data[threadIdx.y + i][0]) * w;
double fsr =
(cols_data[threadIdx.x][1] + rows_data[threadIdx.y + i][1]) * w;
fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX;
fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN;
fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX;
fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN;
int sc = (int)lrint(fsc);
int sr = (int)lrint(fsr);
int fc = sc & (INTER_TAB_SIZE - 1);
int fr = sr & (INTER_TAB_SIZE - 1);
sc = sc >> INTER_BITS;
sr = sr >> INTER_BITS;
sc = sc < -32768 ? -32768 : (sc > 32767 ? 32767 : sc);
sr = sr < -32768 ? -32768 : (sr > 32767 ? 32767 : sr);
const int ksize = IModeTrait<INTER_LANCZOS4>::ksize;
float coefr[ksize], coefc[ksize];
int x[ksize], y[ksize];
if (bmode == BORDER_TRANSPARENT && ((unsigned)sr >= (unsigned)src_rows ||
(unsigned)sc >= (unsigned)src_cols)) {
continue;
}
interpolate_coefs<INTER_LANCZOS4>((float)fr / INTER_TAB_SIZE, coefr);
interpolate_coefs<INTER_LANCZOS4>((float)fc / INTER_TAB_SIZE, coefc);
const BorderMode bmode1 = BModeTrait<bmode>::bmode1;
{
#pragma unroll
rep(k, ksize) {
x[k] = border_interpolate<bmode1>(
sr + k - (ksize / 2) + 1, src_rows);
}
#pragma unroll
rep(k, ksize) {
y[k] = border_interpolate<bmode1>(
sc + k - (ksize / 2) + 1, src_cols);
}
}
float sum[CH] = {0};
rep(kr, ksize) {
if (x[kr] < 0) {
#pragma unroll
rep(ch, CH) sum[ch] += coefr[kr] * border_val;
continue;
}
#pragma unroll
rep(kc, ksize) {
if (y[kc] < 0) {
#pragma unroll
rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * border_val; }
} else {
#pragma unroll
rep(ch, CH) {
sum[ch] +=
coefr[kr] * coefc[kc] * at(src, x[kr], y[kc], ch);
}
}
}
}
#pragma unroll
rep(ch, CH) {
typedef typename TypeTrait<T>::WorkType WorkType;
if (dr + i < dst_rows) {
if (TypeTrait<T>::need_saturate) {
at(dst, dr + i, dc, ch) = saturate<WorkType>(
sum[ch], TypeTrait<T>::min(), TypeTrait<T>::max());
} else {
at(dst, dr + i, dc, ch) = sum[ch];
}
}
}
}
}
}
template <typename T, size_t CH, BorderMode bmode>
__global__ void warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR(
const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step) {
int dc = threadIdx.x + blockIdx.x * blockDim.x;
int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS);
__shared__ double cols_data[BLOCK_THREADS_X1][3];
__shared__ double rows_data[BLOCK_THREADS_Y1 * PROCESS_PER_THREADS][3];
if (dr < dst_rows && dc < dst_cols) {
if (threadIdx.y == 0) {
cols_data[threadIdx.x][0] = M[0] * dc;
cols_data[threadIdx.x][1] = M[3] * dc;
cols_data[threadIdx.x][2] = M[6] * dc;
}
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
rows_data[threadIdx.y + i][0] = M[1] * (dr + i) + M[2];
rows_data[threadIdx.y + i][1] = M[4] * (dr + i) + M[5];
rows_data[threadIdx.y + i][2] = M[7] * (dr + i) + M[8];
}
}
}
__syncthreads();
if (dr < dst_rows && dc < dst_cols) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y + i][2];
w = (w == 0.000000) ? 0 : INTER_TAB_SIZE / w;
double fsc =
(cols_data[threadIdx.x][0] + rows_data[threadIdx.y + i][0]) * w;
double fsr =
(cols_data[threadIdx.x][1] + rows_data[threadIdx.y + i][1]) * w;
fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX;
fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN;
fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX;
fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN;
int sc = (int)lrint(fsc);
int sr = (int)lrint(fsr);
int fc = sc & (INTER_TAB_SIZE - 1);
int fr = sr & (INTER_TAB_SIZE - 1);
sc = sc >> INTER_BITS;
sr = sr >> INTER_BITS;
sc = sc < -32768 ? -32768 : (sc > 32767 ? 32767 : sc);
sr = sr < -32768 ? -32768 : (sr > 32767 ? 32767 : sr);
const int ksize = IModeTrait<INTER_CUBIC>::ksize;
float coefr[ksize], coefc[ksize];
int x[ksize], y[ksize];
if (bmode == BORDER_TRANSPARENT && ((unsigned)sr >= (unsigned)src_rows ||
(unsigned)sc >= (unsigned)src_cols)) {
continue;
}
interpolate_coefs<INTER_CUBIC>((float)fr / INTER_TAB_SIZE, coefr);
interpolate_coefs<INTER_CUBIC>((float)fc / INTER_TAB_SIZE, coefc);
const BorderMode bmode1 = BModeTrait<bmode>::bmode1;
{
#pragma unroll
rep(k, ksize) {
x[k] = border_interpolate<bmode1>(
sr + k - (ksize / 2) + 1, src_rows);
}
#pragma unroll
rep(k, ksize) {
y[k] = border_interpolate<bmode1>(
sc + k - (ksize / 2) + 1, src_cols);
}
}
float sum[CH] = {0};
rep(kr, ksize) {
if (x[kr] < 0) {
#pragma unroll
rep(ch, CH) sum[ch] += coefr[kr] * border_val;
continue;
}
#pragma unroll
rep(kc, ksize) {
if (y[kc] < 0) {
#pragma unroll
rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * border_val; }
} else {
#pragma unroll
rep(ch, CH) {
sum[ch] +=
coefr[kr] * coefc[kc] * at(src, x[kr], y[kc], ch);
}
}
}
}
#pragma unroll
rep(ch, CH) {
typedef typename TypeTrait<T>::WorkType WorkType;
if (dr + i < dst_rows) {
if (TypeTrait<T>::need_saturate) {
at(dst, dr + i, dc, ch) = saturate<WorkType>(
sum[ch], TypeTrait<T>::min(), TypeTrait<T>::max());
} else {
at(dst, dr + i, dc, ch) = sum[ch];
}
}
}
}
}
}
template <typename T, size_t CH, BorderMode bmode>
__global__ void warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR(
const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step) {
int dc = threadIdx.x + blockIdx.x * blockDim.x;
int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS);
__shared__ double cols_data[BLOCK_THREADS_X1][3];
__shared__ double rows_data[BLOCK_THREADS_Y1 * PROCESS_PER_THREADS][3];
if (dr < dst_rows && dc < dst_cols) {
if (threadIdx.y == 0) {
cols_data[threadIdx.x][0] = M[0] * dc;
cols_data[threadIdx.x][1] = M[3] * dc;
cols_data[threadIdx.x][2] = M[6] * dc;
}
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
rows_data[threadIdx.y + i][0] = M[1] * (dr + i) + M[2];
rows_data[threadIdx.y + i][1] = M[4] * (dr + i) + M[5];
rows_data[threadIdx.y + i][2] = M[7] * (dr + i) + M[8];
}
}
}
__syncthreads();
if (dr < dst_rows && dc < dst_cols) {
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y + i][2];
w = (w == 0.000000) ? 0 : INTER_TAB_SIZE / w;
double fsc =
(cols_data[threadIdx.x][0] + rows_data[threadIdx.y + i][0]) * w;
double fsr =
(cols_data[threadIdx.x][1] + rows_data[threadIdx.y + i][1]) * w;
fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX;
fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN;
fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX;
fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN;
int sc = (int)lrint(fsc);
int sr = (int)lrint(fsr);
int fc = sc & (INTER_TAB_SIZE - 1);
int fr = sr & (INTER_TAB_SIZE - 1);
sc = sc >> INTER_BITS;
sr = sr >> INTER_BITS;
sc = sc < -32768 ? -32768 : (sc > 32767 ? 32767 : sc);
sr = sr < -32768 ? -32768 : (sr > 32767 ? 32767 : sr);
const int ksize = IModeTrait<INTER_LINEAR>::ksize;
float coefr[ksize], coefc[ksize];
int x[ksize], y[ksize];
if (bmode == BORDER_TRANSPARENT &&
((unsigned)(sr + 1) >= (unsigned)src_rows ||
(unsigned)(sc + 1) >= (unsigned)src_cols)) {
continue;
}
interpolate_coefs<INTER_LINEAR>((float)fr / INTER_TAB_SIZE, coefr);
interpolate_coefs<INTER_LINEAR>((float)fc / INTER_TAB_SIZE, coefc);
const BorderMode bmode1 = BModeTrait<bmode>::bmode1;
{
#pragma unroll
rep(k, ksize) {
x[k] = border_interpolate<bmode1>(
sr + k - (ksize / 2) + 1, src_rows);
}
#pragma unroll
rep(k, ksize) {
y[k] = border_interpolate<bmode1>(
sc + k - (ksize / 2) + 1, src_cols);
}
}
float sum[CH] = {0};
rep(kr, ksize) {
if (x[kr] < 0) {
#pragma unroll
rep(ch, CH) sum[ch] += coefr[kr] * border_val;
continue;
}
#pragma unroll
rep(kc, ksize) {
if (y[kc] < 0) {
#pragma unroll
rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * border_val; }
} else {
#pragma unroll
rep(ch, CH) {
sum[ch] +=
coefr[kr] * coefc[kc] * at(src, x[kr], y[kc], ch);
}
}
}
}
#pragma unroll
rep(ch, CH) {
typedef typename TypeTrait<T>::WorkType WorkType;
if (dr + i < dst_rows) {
if (TypeTrait<T>::need_saturate) {
at(dst, dr + i, dc, ch) = saturate<WorkType>(
sum[ch], TypeTrait<T>::min(), TypeTrait<T>::max());
} else {
at(dst, dr + i, dc, ch) = sum[ch];
}
}
}
}
}
}
template <typename T, size_t CH, BorderMode bmode>
__global__ void warp_perspective_cv_kernel_cacheToL_NEAREST(
const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step) {
#define SET_DST_CH_VALUE \
if (CH == 1) { \
dst[dst_address_increase] = src[src_address_increase]; \
} else { \
dst[dst_address_increase] = src[src_address_increase]; \
dst[dst_address_increase + 1] = src[src_address_increase + 1]; \
dst[dst_address_increase + 2] = src[src_address_increase + 2]; \
}
int dc = threadIdx.x + blockIdx.x * blockDim.x;
int dr = threadIdx.y + blockIdx.y * blockDim.y;
__shared__ double cols_data[BLOCK_THREADS_X1][3];
__shared__ double rows_data[BLOCK_THREADS_Y1][3];
if (dr < dst_rows && dc < dst_cols) {
if (threadIdx.y == 0) {
cols_data[threadIdx.x][0] = M[0] * dc;
cols_data[threadIdx.x][1] = M[3] * dc;
cols_data[threadIdx.x][2] = M[6] * dc;
}
if (threadIdx.x == 0) {
rows_data[threadIdx.y][0] = M[1] * dr + M[2];
rows_data[threadIdx.y][1] = M[4] * dr + M[5];
rows_data[threadIdx.y][2] = M[7] * dr + M[8];
}
}
__syncthreads();
if (dr < dst_rows && dc < dst_cols) {
double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y][2];
w = (w == 0) ? 0 : 1 / w;
double fsc = (cols_data[threadIdx.x][0] + rows_data[threadIdx.y][0]) * w;
double fsr = (cols_data[threadIdx.x][1] + rows_data[threadIdx.y][1]) * w;
int sc = saturate_cast_short(fsc);
int sr = saturate_cast_short(fsr);
size_t dst_address_increase = dr * dst_step + dc * CH;
if ((size_t)sc < src_cols && (size_t)sr < src_rows) {
size_t src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
return;
}
if (bmode == BORDER_REPLICATE) {
sr = saturate(sr, 0, (int)src_rows - 1);
sc = saturate(sc, 0, (int)src_cols - 1);
size_t src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
} else if (bmode == BORDER_CONSTANT) {
if (CH == 1) {
dst[dst_address_increase] = border_val;
} else {
dst[dst_address_increase + 0] = border_val;
dst[dst_address_increase + 1] = border_val;
dst[dst_address_increase + 2] = border_val;
}
} else if (bmode != BORDER_TRANSPARENT) {
sr = border_interpolate<bmode>(sr, src_rows);
sc = border_interpolate<bmode>(sc, src_cols);
size_t src_address_increase = sr * src_step + sc * CH;
src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
}
}
#undef SET_DST_CH_VALUE
}
template <typename T, size_t CH, BorderMode bmode>
__global__ void warp_perspective_cv_kernel_NEAREST_VECTOR(
const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step) {
int dc = threadIdx.x + blockIdx.x * blockDim.x;
int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS);
#define SET_DST_CH_VALUE \
if (CH == 1) { \
dst[dst_address_increase] = src[src_address_increase]; \
} else { \
dst[dst_address_increase] = src[src_address_increase]; \
dst[dst_address_increase + 1] = src[src_address_increase + 1]; \
dst[dst_address_increase + 2] = src[src_address_increase + 2]; \
}
if (dr < dst_rows && dc < dst_cols) {
double w0 = M[6] * dc + M[7] * dr + M[8];
double fc0 = M[0] * dc + M[1] * dr + M[2];
double fr0 = M[3] * dc + M[4] * dr + M[5];
for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) {
if (dr + i >= dst_rows)
return;
//! To make the result equal to the naive version
double w = w0 + M[7] * i;
w = w ? 1. / w : 0;
double fsc = (fc0 + M[1] * i) * w;
double fsr = (fr0 + M[4] * i) * w;
fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX;
fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN;
fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX;
fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN;
int sc = saturate_cast_short(fsc);
int sr = saturate_cast_short(fsr);
size_t dst_address_increase = (dr + i) * dst_step + dc * CH;
if ((size_t)sc < src_cols && (size_t)sr < src_rows) {
size_t src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
continue;
}
if (bmode == BORDER_REPLICATE) {
sr = saturate(sr, 0, (int)src_rows - 1);
sc = saturate(sc, 0, (int)src_cols - 1);
size_t src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
} else if (bmode == BORDER_CONSTANT) {
if (CH == 1) {
dst[dst_address_increase] = border_val;
} else {
dst[dst_address_increase + 0] = border_val;
dst[dst_address_increase + 1] = border_val;
dst[dst_address_increase + 2] = border_val;
}
} else if (bmode != BORDER_TRANSPARENT) {
sr = border_interpolate<bmode>(sr, src_rows);
sc = border_interpolate<bmode>(sc, src_cols);
size_t src_address_increase = sr * src_step + sc * CH;
SET_DST_CH_VALUE
}
}
}
#undef SET_DST_CH_VALUE
}
template <typename T, size_t CH>
void warp_perspective_cv_proxy(
const T* src, T* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step, BorderMode bmode, InterpolationMode imode,
const float* trans, const T bval, double* workspace, cudaStream_t stream) {
preprocess_trans<<<1, 1, 0, stream>>>(workspace, trans);
cuda_check(cudaStreamSynchronize(stream));
//! Copy trans to const memory
cuda_check(cudaMemcpyToSymbol(
M, workspace, sizeof(double) * 9, 0, cudaMemcpyHostToDevice));
//! Copy bval to const memory
cuda_check(cudaMemcpyToSymbol(
border_val, &bval, sizeof(float), 0, cudaMemcpyHostToDevice));
dim3 THREADS, BLOCKS;
dim3 THREADS_VECTOR, BLOCKS_VECTOR;
switch (imode) {
case INTER_NEAREST:
if (CH == 3 && sizeof(T) == sizeof(float)) {
THREADS.x = BLOCK_THREADS_X1;
THREADS.y = BLOCK_THREADS_Y1;
BLOCKS.x = DIVUP(dst_cols, THREADS.x);
BLOCKS.y = DIVUP(dst_rows, THREADS.y);
switch (bmode) {
case BORDER_REPLICATE:
warp_perspective_cv_kernel_cacheToL_NEAREST<
T, CH, BORDER_REPLICATE>
<<<BLOCKS, THREADS, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_REFLECT:
warp_perspective_cv_kernel_cacheToL_NEAREST<
T, CH, BORDER_REFLECT><<<BLOCKS, THREADS, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_REFLECT_101:
warp_perspective_cv_kernel_cacheToL_NEAREST<
T, CH, BORDER_REFLECT_101>
<<<BLOCKS, THREADS, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_WRAP:
warp_perspective_cv_kernel_cacheToL_NEAREST<T, CH, BORDER_WRAP>
<<<BLOCKS, THREADS, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_CONSTANT:
warp_perspective_cv_kernel_cacheToL_NEAREST<
T, CH, BORDER_CONSTANT><<<BLOCKS, THREADS, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_TRANSPARENT:
warp_perspective_cv_kernel_cacheToL_NEAREST<
T, CH, BORDER_TRANSPARENT>
<<<BLOCKS, THREADS, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
default:
break;
}
} else {
THREADS_VECTOR.x = BLOCK_THREADS_X1;
THREADS_VECTOR.y = BLOCK_THREADS_Y1;
BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x);
BLOCKS_VECTOR.y =
DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS);
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
switch (bmode) {
case BORDER_REPLICATE:
warp_perspective_cv_kernel_NEAREST_VECTOR<
T, CH, BORDER_REPLICATE>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_REFLECT:
warp_perspective_cv_kernel_NEAREST_VECTOR<T, CH, BORDER_REFLECT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_REFLECT_101:
warp_perspective_cv_kernel_NEAREST_VECTOR<
T, CH, BORDER_REFLECT_101>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_WRAP:
warp_perspective_cv_kernel_NEAREST_VECTOR<T, CH, BORDER_WRAP>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_CONSTANT:
warp_perspective_cv_kernel_NEAREST_VECTOR<
T, CH, BORDER_CONSTANT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_TRANSPARENT:
warp_perspective_cv_kernel_NEAREST_VECTOR<
T, CH, BORDER_TRANSPARENT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
default:
break;
}
}
break;
case INTER_LINEAR:
{
{
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
THREADS_VECTOR.x = BLOCK_THREADS_X1;
THREADS_VECTOR.y = BLOCK_THREADS_Y1;
BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x);
BLOCKS_VECTOR.y =
DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS);
switch (bmode) {
case BORDER_REPLICATE:
warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_REPLICATE>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_REFLECT:
warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_REFLECT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_REFLECT_101:
warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_REFLECT_101>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_WRAP:
warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_WRAP>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_CONSTANT:
warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_CONSTANT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
case BORDER_TRANSPARENT:
if (CH == 3)
warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR<
T, CH, BORDER_TRANSPARENT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows,
dst_cols, src_step, dst_step);
break;
default:
break;
}
}
}
break;
case INTER_CUBIC:
THREADS_VECTOR.x = BLOCK_THREADS_X1;
THREADS_VECTOR.y = BLOCK_THREADS_Y1;
BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x);
BLOCKS_VECTOR.y = DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS);
switch (bmode) {
case BORDER_REPLICATE:
warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_REPLICATE>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_REFLECT:
warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_REFLECT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_REFLECT_101:
warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_REFLECT_101>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_WRAP:
warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_WRAP>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_CONSTANT:
warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_CONSTANT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_TRANSPARENT:
warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR<
T, CH, BORDER_TRANSPARENT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
default:
break;
}
break;
case INTER_LANCZOS4:
{
THREADS_VECTOR.x = BLOCK_THREADS_X1;
THREADS_VECTOR.y = BLOCK_THREADS_Y1;
BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x);
BLOCKS_VECTOR.y = DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS);
switch (bmode) {
case BORDER_REPLICATE:
warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<
T, CH, BORDER_REPLICATE>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_REFLECT:
warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<
T, CH, BORDER_REFLECT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_REFLECT_101:
warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<
T, CH, BORDER_REFLECT_101>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_WRAP:
warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<T, CH, BORDER_WRAP>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_CONSTANT:
warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<
T, CH, BORDER_CONSTANT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
case BORDER_TRANSPARENT:
warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<
T, CH, BORDER_TRANSPARENT>
<<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>(
src, dst, src_rows, src_cols, dst_rows, dst_cols,
src_step, dst_step);
break;
default:
break;
}
}
break;
default:
break;
}
}
template void warp_perspective_cv_proxy<float, 1>(
const float* src, float* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step, BorderMode bmode, InterpolationMode imode,
const float* trans, const float border_val, double* workspace,
cudaStream_t stream);
template void warp_perspective_cv_proxy<uchar, 1>(
const uchar* src, uchar* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step, BorderMode bmode, InterpolationMode imode,
const float* trans, const uchar border_val, double* workspace,
cudaStream_t stream);
template void warp_perspective_cv_proxy<float, 3>(
const float* src, float* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step, BorderMode bmode, InterpolationMode imode,
const float* trans, const float border_val, double* workspace,
cudaStream_t stream);
template void warp_perspective_cv_proxy<uchar, 3>(
const uchar* src, uchar* dst, const size_t src_rows, const size_t src_cols,
const size_t dst_rows, const size_t dst_cols, const size_t src_step,
const size_t dst_step, BorderMode bmode, InterpolationMode imode,
const float* trans, const uchar border_val, double* workspace,
cudaStream_t stream);
} // namespace warp_perspective
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
d79f80870f3099987e6e98fea6b1157149d1b48b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/slice_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype* in_data,
const bool forward, const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index = slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if ( bottom[ 0 ]->count() < 1 ) { return; }
if (top.size() == 1) { return; }
int offset_slice_axis = 0;
const Dtype* bottom_data = bottom[0]->gpu_data();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = true;
for (int i = 0; i < top.size(); ++i) {
Dtype* top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom_data, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
offset_slice_axis += top_slice_axis;
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0] || top.size() == 1) { return; }
if ( bottom[ 0 ]->count() < 1 ) { return; }
int offset_slice_axis = 0;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = false;
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, top_diff, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff);
offset_slice_axis += top_slice_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer);
} // namespace caffe
| d79f80870f3099987e6e98fea6b1157149d1b48b.cu | #include <vector>
#include "caffe/layers/slice_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype* in_data,
const bool forward, const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index = slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if ( bottom[ 0 ]->count() < 1 ) { return; }
if (top.size() == 1) { return; }
int offset_slice_axis = 0;
const Dtype* bottom_data = bottom[0]->gpu_data();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = true;
for (int i = 0; i < top.size(); ++i) {
Dtype* top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom_data, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
offset_slice_axis += top_slice_axis;
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0] || top.size() == 1) { return; }
if ( bottom[ 0 ]->count() < 1 ) { return; }
int offset_slice_axis = 0;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = false;
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top_diff, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff);
offset_slice_axis += top_slice_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer);
} // namespace caffe
|
f2c428284b78ec316f2902fbe6ccfdec4bea519c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/join.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/error.hpp>
#include <hash/concurrent_unordered_map.cuh>
#include <join/join_common_utils.hpp>
#include <cudf/detail/gather.cuh>
#include <join/hash_join.cuh>
#include "cudf/types.hpp"
namespace cudf {
namespace detail {
/**
* @brief Performs a left semi or anti join on the specified columns of two
* tables (left, right)
*
* The semi and anti joins only return data from the left table. A left semi join
* returns rows that exist in the right table, a left anti join returns rows
* that do not exist in the right table.
*
* The basic approach is to create a hash table containing the contents of the right
* table and then select only rows that exist (or don't exist) to be included in
* the return set.
*
* @throws cudf::logic_error if number of columns in either `left` or `right` table is 0
* @throws cudf::logic_error if number of returned columns is 0
* @throws cudf::logic_error if number of elements in `right_on` and `left_on` are not equal
*
* @param[in] left The left table
* @param[in] right The right table
* @param[in] left_on The column indices from `left` to join on.
* The column from `left` indicated by `left_on[i]`
* will be compared against the column from `right`
* indicated by `right_on[i]`.
* @param[in] right_on The column indices from `right` to join on.
* The column from `right` indicated by `right_on[i]`
* will be compared against the column from `left`
* indicated by `left_on[i]`.
* @param[in] return_columns A vector of column indices from `left` to
* include in the returned table.
* @param[in] compare_nulls Controls whether null join-key values should match or not.
* @param[in] mr Device memory resource to used to allocate the returned table's
* device memory
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @tparam join_kind Indicates whether to do LEFT_SEMI_JOIN or LEFT_ANTI_JOIN
*
* @returns Result of joining `left` and `right` tables on the columns
* specified by `left_on` and `right_on`. The resulting table
* will contain `return_columns` from `left` that match in right.
*/
template <join_kind JoinKind>
std::unique_ptr<cudf::table> left_semi_anti_join(
cudf::table_view const& left,
cudf::table_view const& right,
std::vector<cudf::size_type> const& left_on,
std::vector<cudf::size_type> const& right_on,
std::vector<cudf::size_type> const& return_columns,
null_equality compare_nulls,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
hipStream_t stream = 0)
{
CUDF_EXPECTS(0 != left.num_columns(), "Left table is empty");
CUDF_EXPECTS(0 != right.num_columns(), "Right table is empty");
CUDF_EXPECTS(left_on.size() == right_on.size(), "Mismatch in number of columns to be joined on");
if (0 == return_columns.size()) { return empty_like(left.select(return_columns)); }
if (is_trivial_join(left, right, left_on, right_on, JoinKind)) {
return empty_like(left.select(return_columns));
}
if ((join_kind::LEFT_ANTI_JOIN == JoinKind) && (0 == right.num_rows())) {
// Everything matches, just copy the proper columns from the left table
return std::make_unique<table>(left.select(return_columns), stream, mr);
}
// Only care about existence, so we'll use an unordered map (other joins need a multimap)
using hash_table_type = concurrent_unordered_map<cudf::size_type, bool, row_hash, row_equality>;
// Create hash table containing all keys found in right table
auto right_rows_d = table_device_view::create(right.select(right_on), stream);
size_t const hash_table_size = compute_hash_table_size(right.num_rows());
row_hash hash_build{*right_rows_d};
row_equality equality_build{*right_rows_d, *right_rows_d, compare_nulls == null_equality::EQUAL};
// Going to join it with left table
auto left_rows_d = table_device_view::create(left.select(left_on), stream);
row_hash hash_probe{*left_rows_d};
row_equality equality_probe{*left_rows_d, *right_rows_d, compare_nulls == null_equality::EQUAL};
auto hash_table_ptr = hash_table_type::create(hash_table_size,
std::numeric_limits<bool>::max(),
std::numeric_limits<cudf::size_type>::max(),
hash_build,
equality_build);
auto hash_table = *hash_table_ptr;
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
right.num_rows(),
[hash_table] __device__(size_type idx) mutable {
hash_table.insert(thrust::make_pair(idx, true));
});
//
// Now we have a hash table, we need to iterate over the rows of the left table
// and check to see if they are contained in the hash table
//
// For semi join we want contains to be true, for anti join we want contains to be false
bool join_type_boolean = (JoinKind == join_kind::LEFT_SEMI_JOIN);
rmm::device_vector<size_type> gather_map(left.num_rows());
// gather_map_end will be the end of valid data in gather_map
auto gather_map_end = thrust::copy_if(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(left.num_rows()),
gather_map.begin(),
[hash_table, join_type_boolean, hash_probe, equality_probe] __device__(size_type idx) {
auto pos = hash_table.find(idx, hash_probe, equality_probe);
return (pos != hash_table.end()) == join_type_boolean;
});
return cudf::detail::gather(
left.select(return_columns), gather_map.begin(), gather_map_end, false, mr);
}
} // namespace detail
std::unique_ptr<cudf::table> left_semi_join(cudf::table_view const& left,
cudf::table_view const& right,
std::vector<cudf::size_type> const& left_on,
std::vector<cudf::size_type> const& right_on,
std::vector<cudf::size_type> const& return_columns,
null_equality compare_nulls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::left_semi_anti_join<detail::join_kind::LEFT_SEMI_JOIN>(
left, right, left_on, right_on, return_columns, compare_nulls, mr, 0);
}
std::unique_ptr<cudf::table> left_anti_join(cudf::table_view const& left,
cudf::table_view const& right,
std::vector<cudf::size_type> const& left_on,
std::vector<cudf::size_type> const& right_on,
std::vector<cudf::size_type> const& return_columns,
null_equality compare_nulls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::left_semi_anti_join<detail::join_kind::LEFT_ANTI_JOIN>(
left, right, left_on, right_on, return_columns, compare_nulls, mr, 0);
}
} // namespace cudf
| f2c428284b78ec316f2902fbe6ccfdec4bea519c.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/join.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/error.hpp>
#include <hash/concurrent_unordered_map.cuh>
#include <join/join_common_utils.hpp>
#include <cudf/detail/gather.cuh>
#include <join/hash_join.cuh>
#include "cudf/types.hpp"
namespace cudf {
namespace detail {
/**
* @brief Performs a left semi or anti join on the specified columns of two
* tables (left, right)
*
* The semi and anti joins only return data from the left table. A left semi join
* returns rows that exist in the right table, a left anti join returns rows
* that do not exist in the right table.
*
* The basic approach is to create a hash table containing the contents of the right
* table and then select only rows that exist (or don't exist) to be included in
* the return set.
*
* @throws cudf::logic_error if number of columns in either `left` or `right` table is 0
* @throws cudf::logic_error if number of returned columns is 0
* @throws cudf::logic_error if number of elements in `right_on` and `left_on` are not equal
*
* @param[in] left The left table
* @param[in] right The right table
* @param[in] left_on The column indices from `left` to join on.
* The column from `left` indicated by `left_on[i]`
* will be compared against the column from `right`
* indicated by `right_on[i]`.
* @param[in] right_on The column indices from `right` to join on.
* The column from `right` indicated by `right_on[i]`
* will be compared against the column from `left`
* indicated by `left_on[i]`.
* @param[in] return_columns A vector of column indices from `left` to
* include in the returned table.
* @param[in] compare_nulls Controls whether null join-key values should match or not.
* @param[in] mr Device memory resource to used to allocate the returned table's
* device memory
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @tparam join_kind Indicates whether to do LEFT_SEMI_JOIN or LEFT_ANTI_JOIN
*
* @returns Result of joining `left` and `right` tables on the columns
* specified by `left_on` and `right_on`. The resulting table
* will contain `return_columns` from `left` that match in right.
*/
template <join_kind JoinKind>
std::unique_ptr<cudf::table> left_semi_anti_join(
cudf::table_view const& left,
cudf::table_view const& right,
std::vector<cudf::size_type> const& left_on,
std::vector<cudf::size_type> const& right_on,
std::vector<cudf::size_type> const& return_columns,
null_equality compare_nulls,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
cudaStream_t stream = 0)
{
CUDF_EXPECTS(0 != left.num_columns(), "Left table is empty");
CUDF_EXPECTS(0 != right.num_columns(), "Right table is empty");
CUDF_EXPECTS(left_on.size() == right_on.size(), "Mismatch in number of columns to be joined on");
if (0 == return_columns.size()) { return empty_like(left.select(return_columns)); }
if (is_trivial_join(left, right, left_on, right_on, JoinKind)) {
return empty_like(left.select(return_columns));
}
if ((join_kind::LEFT_ANTI_JOIN == JoinKind) && (0 == right.num_rows())) {
// Everything matches, just copy the proper columns from the left table
return std::make_unique<table>(left.select(return_columns), stream, mr);
}
// Only care about existence, so we'll use an unordered map (other joins need a multimap)
using hash_table_type = concurrent_unordered_map<cudf::size_type, bool, row_hash, row_equality>;
// Create hash table containing all keys found in right table
auto right_rows_d = table_device_view::create(right.select(right_on), stream);
size_t const hash_table_size = compute_hash_table_size(right.num_rows());
row_hash hash_build{*right_rows_d};
row_equality equality_build{*right_rows_d, *right_rows_d, compare_nulls == null_equality::EQUAL};
// Going to join it with left table
auto left_rows_d = table_device_view::create(left.select(left_on), stream);
row_hash hash_probe{*left_rows_d};
row_equality equality_probe{*left_rows_d, *right_rows_d, compare_nulls == null_equality::EQUAL};
auto hash_table_ptr = hash_table_type::create(hash_table_size,
std::numeric_limits<bool>::max(),
std::numeric_limits<cudf::size_type>::max(),
hash_build,
equality_build);
auto hash_table = *hash_table_ptr;
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
right.num_rows(),
[hash_table] __device__(size_type idx) mutable {
hash_table.insert(thrust::make_pair(idx, true));
});
//
// Now we have a hash table, we need to iterate over the rows of the left table
// and check to see if they are contained in the hash table
//
// For semi join we want contains to be true, for anti join we want contains to be false
bool join_type_boolean = (JoinKind == join_kind::LEFT_SEMI_JOIN);
rmm::device_vector<size_type> gather_map(left.num_rows());
// gather_map_end will be the end of valid data in gather_map
auto gather_map_end = thrust::copy_if(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(left.num_rows()),
gather_map.begin(),
[hash_table, join_type_boolean, hash_probe, equality_probe] __device__(size_type idx) {
auto pos = hash_table.find(idx, hash_probe, equality_probe);
return (pos != hash_table.end()) == join_type_boolean;
});
return cudf::detail::gather(
left.select(return_columns), gather_map.begin(), gather_map_end, false, mr);
}
} // namespace detail
std::unique_ptr<cudf::table> left_semi_join(cudf::table_view const& left,
cudf::table_view const& right,
std::vector<cudf::size_type> const& left_on,
std::vector<cudf::size_type> const& right_on,
std::vector<cudf::size_type> const& return_columns,
null_equality compare_nulls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::left_semi_anti_join<detail::join_kind::LEFT_SEMI_JOIN>(
left, right, left_on, right_on, return_columns, compare_nulls, mr, 0);
}
std::unique_ptr<cudf::table> left_anti_join(cudf::table_view const& left,
cudf::table_view const& right,
std::vector<cudf::size_type> const& left_on,
std::vector<cudf::size_type> const& right_on,
std::vector<cudf::size_type> const& return_columns,
null_equality compare_nulls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::left_semi_anti_join<detail::join_kind::LEFT_ANTI_JOIN>(
left, right, left_on, right_on, return_columns, compare_nulls, mr, 0);
}
} // namespace cudf
|
c384685af23e0c8039254dbfbde73afe8788e496.hip | // !!! This is a file automatically generated by hipify!!!
#include "gg.h"
#include "ggcuda.h"
#include "hipcub/hipcub.hpp"
#include "hipcub/hipcub.hpp"
#include "thread_work.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
#include "moderngpu/kernel_reduce.hxx"
#include "tc_cuda.cuh"
#include "moderngpu/kernel_segsort.hxx"
#include <hip/hip_runtime_api.h>
mgpu::standard_context_t context;
#define WARP_SIZE 32
inline __device__ unsigned long intersect(CSRGraph graph, index_type u, index_type v) {
index_type u_start = graph.getFirstEdge(u);
index_type u_end = u_start + graph.getOutDegree(u);
index_type v_start = graph.getFirstEdge(v);
index_type v_end = v_start + graph.getOutDegree(v);
unsigned long count = 0;
index_type u_it = u_start;
index_type v_it = v_start;
index_type a;
index_type b;
while (u_it < u_end && v_it < v_end) {
a = graph.getAbsDestination(u_it);
b = graph.getAbsDestination(v_it);
int d = a - b;
if (d <= 0) u_it++;
if (d >= 0) v_it++;
if (d == 0) count++;
}
return count;
}
__global__ void base(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) {
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long local_total = 0;
__shared__ hipcub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts;
num_local_triangles.thread_entry();
for (index_type src = begin + tid; src < end; src += TOTAL_THREADS_1D) {
index_type row_begin = graph.getFirstEdge(src);
index_type row_end = row_begin + graph.getOutDegree(src);
for (index_type offset = row_begin; offset < row_end; ++ offset) {
index_type dst = graph.getAbsDestination(offset);
local_total = intersect(graph, dst, src);
if (local_total) num_local_triangles.reduce(local_total);
}
}
num_local_triangles.thread_exit<hipcub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts);
}
inline __device__ bool serial_search(CSRGraph graph, unsigned key, index_type begin, index_type end) {
for (index_type offset = begin; offset < end; ++ offset) {
index_type d = graph.getAbsDestination(offset);
if (d == key) return true;
if (d > key) return false;
}
return false;
}
inline __device__ bool binary_search(CSRGraph graph, index_type key, index_type begin, index_type end) {
assert(begin < end);
int l = begin;
int r = end-1;
while (r >= l) {
//assert(l<graph.nedges && r<graph.nedges);
int mid = l + (r - l) / 2;
if (mid >= graph.nedges) printf("mid=%u, l=%u, r=%u, begin=%u, end=%u, key=%u\n", mid, l, r, begin, end, key);
assert(mid < graph.nedges);
index_type value = graph.getAbsDestination(mid);
if (value == key) return true;
if (value < key) l = mid + 1;
else r = mid - 1;
}
return false;
}
__global__ void warp(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) {
unsigned thread_id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned thread_lane = threadIdx.x & (WARP_SIZE-1); // thread index within the warp
unsigned warp_id = thread_id / WARP_SIZE; // global warp index
unsigned num_warps = (TB_SIZE / WARP_SIZE) * gridDim.x; // total number of active warps
__shared__ hipcub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts;
num_local_triangles.thread_entry();
// each warp takes one vertex
for (index_type src = begin + warp_id; src < end; src += num_warps) {
index_type row_begin = graph.getFirstEdge(src);
index_type src_size = graph.getOutDegree(src);
index_type row_end = row_begin + src_size;
// take one edge
for (index_type offset = row_begin; offset < row_end; offset ++) {
index_type dst = graph.getAbsDestination(offset);
assert(src != dst);
index_type dst_size = graph.getOutDegree(dst);
index_type lookup = src;
index_type search = dst;
if (src_size > dst_size) {
lookup = dst;
search = src;
}
index_type lookup_begin = graph.getFirstEdge(lookup);
index_type lookup_size = graph.getOutDegree(lookup);
index_type search_size = graph.getOutDegree(search);
if (lookup_size > 0 && search_size > 0) {
for (index_type i = thread_lane; i < lookup_size; i += WARP_SIZE) {
index_type index = lookup_begin + i;
index_type key = graph.getAbsDestination(index);
index_type search_begin = graph.getFirstEdge(search);
if (binary_search(graph, key, search_begin, search_begin+search_size))
//if (serial_search(graph, key, search_begin, search_begin+search_size))
num_local_triangles.reduce(1);
}
}
}
}
num_local_triangles.thread_exit<hipcub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts);
}
void sort_cuda(struct CUDA_Context* ctx) {
mgpu::segmented_sort(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, mgpu::less_t<int>(), context);
}
void TC_cuda(unsigned __begin, unsigned __end, unsigned long & num_local_triangles, struct CUDA_Context* ctx) {
dim3 blocks;
dim3 threads;
kernel_sizing(blocks, threads);
HGAccumulator<unsigned long> _num_local_triangles;
Shared<unsigned long> num_local_trianglesval = Shared<unsigned long>(1);
*(num_local_trianglesval.cpu_wr_ptr()) = 0;
_num_local_triangles.rv = num_local_trianglesval.gpu_wr_ptr();
//mgc = mgpu::CreateCudaDevice(ctx->device);
//mgpu::SegSortKeysFromIndices(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, *mgc);
//base<<<blocks, TB_SIZE>>>(ctx->gg, __begin, __end, _num_local_triangles);
hipLaunchKernelGGL(( warp), dim3(blocks), dim3(TB_SIZE), 0, 0, ctx->gg, __begin, __end, _num_local_triangles);
hipDeviceSynchronize();
check_cuda_kernel;
num_local_triangles = *(num_local_trianglesval.cpu_rd_ptr());
//dump_memory_info("end", ctx->id);
hipProfilerStop();
//num_local_triangles = (unsigned)h_total;
}
void TC_masterNodes_cuda(unsigned long& num_local_triangles, struct CUDA_Context* ctx) {
TC_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, num_local_triangles, ctx);
}
| c384685af23e0c8039254dbfbde73afe8788e496.cu | #include "gg.h"
#include "ggcuda.h"
#include "cub/cub.cuh"
#include "cub/util_allocator.cuh"
#include "thread_work.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
#include "moderngpu/kernel_reduce.hxx"
#include "tc_cuda.cuh"
#include "moderngpu/kernel_segsort.hxx"
#include <cuda_profiler_api.h>
mgpu::standard_context_t context;
#define WARP_SIZE 32
inline __device__ unsigned long intersect(CSRGraph graph, index_type u, index_type v) {
index_type u_start = graph.getFirstEdge(u);
index_type u_end = u_start + graph.getOutDegree(u);
index_type v_start = graph.getFirstEdge(v);
index_type v_end = v_start + graph.getOutDegree(v);
unsigned long count = 0;
index_type u_it = u_start;
index_type v_it = v_start;
index_type a;
index_type b;
while (u_it < u_end && v_it < v_end) {
a = graph.getAbsDestination(u_it);
b = graph.getAbsDestination(v_it);
int d = a - b;
if (d <= 0) u_it++;
if (d >= 0) v_it++;
if (d == 0) count++;
}
return count;
}
__global__ void base(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) {
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long local_total = 0;
__shared__ cub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts;
num_local_triangles.thread_entry();
for (index_type src = begin + tid; src < end; src += TOTAL_THREADS_1D) {
index_type row_begin = graph.getFirstEdge(src);
index_type row_end = row_begin + graph.getOutDegree(src);
for (index_type offset = row_begin; offset < row_end; ++ offset) {
index_type dst = graph.getAbsDestination(offset);
local_total = intersect(graph, dst, src);
if (local_total) num_local_triangles.reduce(local_total);
}
}
num_local_triangles.thread_exit<cub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts);
}
inline __device__ bool serial_search(CSRGraph graph, unsigned key, index_type begin, index_type end) {
for (index_type offset = begin; offset < end; ++ offset) {
index_type d = graph.getAbsDestination(offset);
if (d == key) return true;
if (d > key) return false;
}
return false;
}
inline __device__ bool binary_search(CSRGraph graph, index_type key, index_type begin, index_type end) {
assert(begin < end);
int l = begin;
int r = end-1;
while (r >= l) {
//assert(l<graph.nedges && r<graph.nedges);
int mid = l + (r - l) / 2;
if (mid >= graph.nedges) printf("mid=%u, l=%u, r=%u, begin=%u, end=%u, key=%u\n", mid, l, r, begin, end, key);
assert(mid < graph.nedges);
index_type value = graph.getAbsDestination(mid);
if (value == key) return true;
if (value < key) l = mid + 1;
else r = mid - 1;
}
return false;
}
__global__ void warp(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) {
unsigned thread_id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned thread_lane = threadIdx.x & (WARP_SIZE-1); // thread index within the warp
unsigned warp_id = thread_id / WARP_SIZE; // global warp index
unsigned num_warps = (TB_SIZE / WARP_SIZE) * gridDim.x; // total number of active warps
__shared__ cub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts;
num_local_triangles.thread_entry();
// each warp takes one vertex
for (index_type src = begin + warp_id; src < end; src += num_warps) {
index_type row_begin = graph.getFirstEdge(src);
index_type src_size = graph.getOutDegree(src);
index_type row_end = row_begin + src_size;
// take one edge
for (index_type offset = row_begin; offset < row_end; offset ++) {
index_type dst = graph.getAbsDestination(offset);
assert(src != dst);
index_type dst_size = graph.getOutDegree(dst);
index_type lookup = src;
index_type search = dst;
if (src_size > dst_size) {
lookup = dst;
search = src;
}
index_type lookup_begin = graph.getFirstEdge(lookup);
index_type lookup_size = graph.getOutDegree(lookup);
index_type search_size = graph.getOutDegree(search);
if (lookup_size > 0 && search_size > 0) {
for (index_type i = thread_lane; i < lookup_size; i += WARP_SIZE) {
index_type index = lookup_begin + i;
index_type key = graph.getAbsDestination(index);
index_type search_begin = graph.getFirstEdge(search);
if (binary_search(graph, key, search_begin, search_begin+search_size))
//if (serial_search(graph, key, search_begin, search_begin+search_size))
num_local_triangles.reduce(1);
}
}
}
}
num_local_triangles.thread_exit<cub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts);
}
void sort_cuda(struct CUDA_Context* ctx) {
mgpu::segmented_sort(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, mgpu::less_t<int>(), context);
}
void TC_cuda(unsigned __begin, unsigned __end, unsigned long & num_local_triangles, struct CUDA_Context* ctx) {
dim3 blocks;
dim3 threads;
kernel_sizing(blocks, threads);
HGAccumulator<unsigned long> _num_local_triangles;
Shared<unsigned long> num_local_trianglesval = Shared<unsigned long>(1);
*(num_local_trianglesval.cpu_wr_ptr()) = 0;
_num_local_triangles.rv = num_local_trianglesval.gpu_wr_ptr();
//mgc = mgpu::CreateCudaDevice(ctx->device);
//mgpu::SegSortKeysFromIndices(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, *mgc);
//base<<<blocks, TB_SIZE>>>(ctx->gg, __begin, __end, _num_local_triangles);
warp<<<blocks, TB_SIZE>>>(ctx->gg, __begin, __end, _num_local_triangles);
cudaDeviceSynchronize();
check_cuda_kernel;
num_local_triangles = *(num_local_trianglesval.cpu_rd_ptr());
//dump_memory_info("end", ctx->id);
cudaProfilerStop();
//num_local_triangles = (unsigned)h_total;
}
void TC_masterNodes_cuda(unsigned long& num_local_triangles, struct CUDA_Context* ctx) {
TC_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, num_local_triangles, ctx);
}
|
fff7798620c5b8a1d3ff4dd9f571edd0dcf93b2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>, created on 25.01.2019
//
#include <loops/special_kernels.h>
namespace nd4j {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// kernel to swap two NDArrays vals as linear sequences
// input - theSecondBuffer/Shape from input NDArray
// output - theFirstBuffer/Shape from input NDArray
template <typename T>
static __global__ void swapUnsafeKernel(void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
__shared__ Nd4jLong resultLength;
__shared__ T* input;
__shared__ T* output;
if (0 == threadIdx.x) {
resultLength = shape::length(theFirstShape);
input = reinterpret_cast<T*>(theSecondBuffer);
output = reinterpret_cast<T*>(theFirstBuffer);
}
__syncthreads();
for (int i = tid; i < resultLength; i += totalThreads) {
auto xEws = shape::order(theFirstShape) == 'c'? shape::elementWiseStride(theFirstShape) :1;
auto yEws = shape::order(theSecondShape) == 'c'? shape::elementWiseStride(theSecondShape):1;
auto xOffset = shape::getIndexOffset(i * xEws, theFirstShape);
auto yOffset = shape::getIndexOffset(i * yEws, theSecondShape);
nd4j::math::nd4j_swap(output[xOffset], input[yOffset]);
}
}
BUILD_SINGLE_TEMPLATE(template __global__ void swapUnsafeKernel, (void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape), LIBND4J_TYPES);
template <typename T>
void templatedSwapUnsafe(void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape, hipStream_t* theStream) {
hipLaunchKernelGGL(( swapUnsafeKernel<T>), dim3(256), dim3(512), 8192, *theStream, theFirstBuffer, theFirstShape, theSecondBuffer, theSecondShape);
}
BUILD_SINGLE_TEMPLATE(template void templatedSwapUnsafe, (void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape, hipStream_t* theStream), LIBND4J_TYPES);
} | fff7798620c5b8a1d3ff4dd9f571edd0dcf93b2c.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>, created on 25.01.2019
//
#include <loops/special_kernels.h>
namespace nd4j {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// kernel to swap two NDArrays vals as linear sequences
// input - theSecondBuffer/Shape from input NDArray
// output - theFirstBuffer/Shape from input NDArray
template <typename T>
static __global__ void swapUnsafeKernel(void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
__shared__ Nd4jLong resultLength;
__shared__ T* input;
__shared__ T* output;
if (0 == threadIdx.x) {
resultLength = shape::length(theFirstShape);
input = reinterpret_cast<T*>(theSecondBuffer);
output = reinterpret_cast<T*>(theFirstBuffer);
}
__syncthreads();
for (int i = tid; i < resultLength; i += totalThreads) {
auto xEws = shape::order(theFirstShape) == 'c'? shape::elementWiseStride(theFirstShape) :1;
auto yEws = shape::order(theSecondShape) == 'c'? shape::elementWiseStride(theSecondShape):1;
auto xOffset = shape::getIndexOffset(i * xEws, theFirstShape);
auto yOffset = shape::getIndexOffset(i * yEws, theSecondShape);
nd4j::math::nd4j_swap(output[xOffset], input[yOffset]);
}
}
BUILD_SINGLE_TEMPLATE(template __global__ void swapUnsafeKernel, (void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape), LIBND4J_TYPES);
template <typename T>
void templatedSwapUnsafe(void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape, cudaStream_t* theStream) {
swapUnsafeKernel<T><<<256, 512, 8192, *theStream>>>(theFirstBuffer, theFirstShape, theSecondBuffer, theSecondShape);
}
BUILD_SINGLE_TEMPLATE(template void templatedSwapUnsafe, (void* theFirstBuffer, Nd4jLong* theFirstShape, void* theSecondBuffer, Nd4jLong* theSecondShape, cudaStream_t* theStream), LIBND4J_TYPES);
} |
7d506eb7a5c2d2d86f2150a078905693a813ccd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<assert.h>
#include<math.h>
#define GET_BLOB_ADDRESS(ptr, offset) (&((ptr)[(offset)/sizeof((ptr)[0])]))
#define GET_ARRAY_CAPACITY(ptr) (((long *)(ptr))[0])
#define GET_ARRAY_LENGTH(ptr) (((long *)(ptr))[1])
#define GET_ARRAY_BODY(ptr) (&((ptr)[128/sizeof((ptr)[0])]))
#define SET_ARRAY_CAPACITY(ptr, val) { (((long *)(ptr))[0]) = (val); }
#define SET_ARRAY_LENGTH(ptr, val) { (((long *)(ptr))[1]) = (val); }
// very simple test kernel
extern "C"
__global__ void identity(int *size, const int *input, int *output) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
output[ix] = input[ix];
}
}
extern "C"
// very simple test kernel for int array
__global__ void intArrayIdentity(int *size, const int *input, int *output, int *length) {
const int ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &input[ix* *length];
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i];
}
}
}
extern "C"
// very simple test kernel for IntDataPoint class
__global__ void IntDataPointIdentity(int *size, const int *inputX, const int *inputY, int *outputX, int *outputY, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &inputX[ix* *length];
int *outArrayBody = &outputX[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i];
}
// copy int scalar value
outputY[ix] = inputY[ix];
}
}
extern "C"
// very simple test kernel for int array with free var
__global__ void intArrayAdd(int *size, const int *input, int *output, const int *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &input[ix* *length];
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
extern "C"
// test kernel for multiple input columns
__global__ void vectorLength(int *size, const double *x, const double *y, double *len) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
len[ix] = sqrt(x[ix] * x[ix] + y[ix] * y[ix]);
}
}
extern "C"
// test kernel for multiple input and multiple output columns, with different types
__global__ void plusMinus(int *size, const double *base, const float *deviation, double *a, float *b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
a[ix] = base[ix] - deviation[ix];
b[ix] = base[ix] + deviation[ix];
}
}
extern "C"
// test kernel for two const arguments
__global__ void applyLinearFunction(int *size, const short *x, short *y, short *a, short *b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
y[ix] = *a + *b * x[ix];
}
}
extern "C"
// test kernel for custom number of blocks + const argument
// manual SIMD, to be ran on size / 8 threads, assumes size % 8 == 0
// note that key is reversed, since it's little endian
__global__ void blockXOR(int *size, const char *input, char *output, long *key) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix * 8 < *size) {
((long *)output)[ix] = ((const long *)input)[ix] ^ *key;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2_self(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
in[ix] = out[ix];
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void sum(int *size, int *input, int *output, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
int result = 0;
for (long i = ix; i < *size; i += jump) {
result += input[i];
}
input[ix] = result;
}
} else if (ix == 0) {
const long count = (*size < (long)jump) ? *size : (long)jump;
int result = 0;
for (long i = 0; i < count; ++i) {
result += input[i];
}
output[0] = result;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void intArraySum(int *size, const int *input, int *output, int *length, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
int *accArrayBody = const_cast<int *>(&input[ix* *length]);
for (long i = ix + jump; i < *size; i += jump) {
const int *inArrayBody = &input[(ix* *length) + i];
for (long j = 0; j < *length; j++) {
accArrayBody[j] += inArrayBody[j];
}
}
}
} else if (ix == 0) {
const long count = (*size < jump) ? *size : (long)jump;
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < count; i++) {
const int *inArrayBody = &input[(i* *length)];
if (i == 0) {
for (long j = 0; j < *length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < *length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
}
}
extern "C"
// map for DataPoint class
__global__ void DataPointMap(int *size, const double *inputX, const double *inputY, double *output, const double *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const double *inArrayBody = &inputX[ix* *length];
double *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
extern "C"
// reduce for DataPoint class
__global__ void DataPointReduce(int *size, const double *input, double *output, int *length, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
double *accArrayBody = const_cast<double *>(&input[ix* *length]);
for (long i = ix + jump; i < *size; i += jump) {
const double *inArrayBody = &input[(ix* *length) + i];
for (long j = 0; j < *length; j++) {
accArrayBody[j] += inArrayBody[j];
}
}
}
} else if (ix == 0) {
const long count = (*size < (long)jump) ? *size : (long)jump;
double *outArrayBody = &output[ix* *length];
for (long i = 0; i < count; i++) {
const double *inArrayBody = &input[(i* *length)];
if (i == 0) {
for (long j = 0; j < *length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < *length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
}
}
// map for Logistic regression
__device__ double sdotvv(const double * __restrict__ x, const double * __restrict__ y, int n) {
double ans = 0.0;
for(int i = 0; i < n; i++) {
ans += x[i] * y[i];
}
return ans;
}
__device__ void dmulvs(double *result, const double * __restrict__ x, double c, int n) {
for(int i = 0; i < n; i++) {
result[i] = x[i] * c;
}
}
__device__ void map(double *result, const double * __restrict__ x, double y, const double * __restrict__ w, int n) {
dmulvs(result, x, (1 / (1 + exp(-y * (sdotvv(w, x, n)))) - 1) * y, n);
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#if (__CUDA_ARCH__ >= 300)
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernel(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernel(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernel(inArray, &outputArrayBody[i], i, n, length);
}
}
#endif
extern "C"
__global__
void blockReduce(int *count, double *data, double * result, int *user_D) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if (idx < *count)
deviceReduceArrayKernel(data, result, *user_D, *count);
#else
printf("not supported");
#endif
}
extern "C"
__global__ void
mapAll(int *count, double *x, double *y, double *result, double *w, int *user_D) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < *count)
map(&result[idx * *user_D], &x[idx * *user_D ], y[idx],w, *user_D);
}
| 7d506eb7a5c2d2d86f2150a078905693a813ccd0.cu | #include<assert.h>
#include<math.h>
#define GET_BLOB_ADDRESS(ptr, offset) (&((ptr)[(offset)/sizeof((ptr)[0])]))
#define GET_ARRAY_CAPACITY(ptr) (((long *)(ptr))[0])
#define GET_ARRAY_LENGTH(ptr) (((long *)(ptr))[1])
#define GET_ARRAY_BODY(ptr) (&((ptr)[128/sizeof((ptr)[0])]))
#define SET_ARRAY_CAPACITY(ptr, val) { (((long *)(ptr))[0]) = (val); }
#define SET_ARRAY_LENGTH(ptr, val) { (((long *)(ptr))[1]) = (val); }
// very simple test kernel
extern "C"
__global__ void identity(int *size, const int *input, int *output) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
output[ix] = input[ix];
}
}
extern "C"
// very simple test kernel for int array
__global__ void intArrayIdentity(int *size, const int *input, int *output, int *length) {
const int ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &input[ix* *length];
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i];
}
}
}
extern "C"
// very simple test kernel for IntDataPoint class
__global__ void IntDataPointIdentity(int *size, const int *inputX, const int *inputY, int *outputX, int *outputY, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &inputX[ix* *length];
int *outArrayBody = &outputX[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i];
}
// copy int scalar value
outputY[ix] = inputY[ix];
}
}
extern "C"
// very simple test kernel for int array with free var
__global__ void intArrayAdd(int *size, const int *input, int *output, const int *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &input[ix* *length];
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
extern "C"
// test kernel for multiple input columns
__global__ void vectorLength(int *size, const double *x, const double *y, double *len) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
len[ix] = sqrt(x[ix] * x[ix] + y[ix] * y[ix]);
}
}
extern "C"
// test kernel for multiple input and multiple output columns, with different types
__global__ void plusMinus(int *size, const double *base, const float *deviation, double *a, float *b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
a[ix] = base[ix] - deviation[ix];
b[ix] = base[ix] + deviation[ix];
}
}
extern "C"
// test kernel for two const arguments
__global__ void applyLinearFunction(int *size, const short *x, short *y, short *a, short *b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
y[ix] = *a + *b * x[ix];
}
}
extern "C"
// test kernel for custom number of blocks + const argument
// manual SIMD, to be ran on size / 8 threads, assumes size % 8 == 0
// note that key is reversed, since it's little endian
__global__ void blockXOR(int *size, const char *input, char *output, long *key) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix * 8 < *size) {
((long *)output)[ix] = ((const long *)input)[ix] ^ *key;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2_self(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
in[ix] = out[ix];
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void sum(int *size, int *input, int *output, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
int result = 0;
for (long i = ix; i < *size; i += jump) {
result += input[i];
}
input[ix] = result;
}
} else if (ix == 0) {
const long count = (*size < (long)jump) ? *size : (long)jump;
int result = 0;
for (long i = 0; i < count; ++i) {
result += input[i];
}
output[0] = result;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void intArraySum(int *size, const int *input, int *output, int *length, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
int *accArrayBody = const_cast<int *>(&input[ix* *length]);
for (long i = ix + jump; i < *size; i += jump) {
const int *inArrayBody = &input[(ix* *length) + i];
for (long j = 0; j < *length; j++) {
accArrayBody[j] += inArrayBody[j];
}
}
}
} else if (ix == 0) {
const long count = (*size < jump) ? *size : (long)jump;
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < count; i++) {
const int *inArrayBody = &input[(i* *length)];
if (i == 0) {
for (long j = 0; j < *length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < *length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
}
}
extern "C"
// map for DataPoint class
__global__ void DataPointMap(int *size, const double *inputX, const double *inputY, double *output, const double *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const double *inArrayBody = &inputX[ix* *length];
double *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
extern "C"
// reduce for DataPoint class
__global__ void DataPointReduce(int *size, const double *input, double *output, int *length, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
double *accArrayBody = const_cast<double *>(&input[ix* *length]);
for (long i = ix + jump; i < *size; i += jump) {
const double *inArrayBody = &input[(ix* *length) + i];
for (long j = 0; j < *length; j++) {
accArrayBody[j] += inArrayBody[j];
}
}
}
} else if (ix == 0) {
const long count = (*size < (long)jump) ? *size : (long)jump;
double *outArrayBody = &output[ix* *length];
for (long i = 0; i < count; i++) {
const double *inArrayBody = &input[(i* *length)];
if (i == 0) {
for (long j = 0; j < *length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < *length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
}
}
// map for Logistic regression
__device__ double sdotvv(const double * __restrict__ x, const double * __restrict__ y, int n) {
double ans = 0.0;
for(int i = 0; i < n; i++) {
ans += x[i] * y[i];
}
return ans;
}
__device__ void dmulvs(double *result, const double * __restrict__ x, double c, int n) {
for(int i = 0; i < n; i++) {
result[i] = x[i] * c;
}
}
__device__ void map(double *result, const double * __restrict__ x, double y, const double * __restrict__ w, int n) {
dmulvs(result, x, (1 / (1 + exp(-y * (sdotvv(w, x, n)))) - 1) * y, n);
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#if (__CUDA_ARCH__ >= 300)
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernel(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernel(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernel(inArray, &outputArrayBody[i], i, n, length);
}
}
#endif
extern "C"
__global__
void blockReduce(int *count, double *data, double * result, int *user_D) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if (idx < *count)
deviceReduceArrayKernel(data, result, *user_D, *count);
#else
printf("not supported");
#endif
}
extern "C"
__global__ void
mapAll(int *count, double *x, double *y, double *result, double *w, int *user_D) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < *count)
map(&result[idx * *user_D], &x[idx * *user_D ], y[idx],w, *user_D);
}
|
4705462b4a7aa9adf2267eab8b26bd2241e97376.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define size 21 // Tamanho da matrix
// Exibe os pontos na tela
__host__ void print(bool** grid){
std::cout << "\n\n\n\n\n";
for(unsigned int i = 1; i < size-1; i++) {
for(unsigned int j = 1; j < size-1; j++)
std::cout << (grid[i][j]?"#":"_");
std::cout << std::endl;
}
}
// Exibe os pontos na tela
__device__ void d_print(bool** grid){
printf("\n\n\n\n\n");
for(unsigned int i = 1; i < size-1; i++) {
for(unsigned int j = 1; j < size-1; j++)
if(grid[i][j]==true){
printf("1");
}else{
printf("0");
}
printf("\n");
}
}
__host__ bool someoneAlive(bool** grid){
for(unsigned int i=0; i < size; i++)
for(unsigned int j=0; j < size; j++)
if(grid[i][j]==true) return true;
return false;
}
// Calcula a simulacao
__global__ void jogo(bool** grid){
int m=blockIdx.x*blockDim.x+threadIdx.x;
int n=blockIdx.y*blockDim.y+threadIdx.y;
if (m<size && n<size){
// printf("m: %d n: %d\n",m,n);
// bool isAlive = false;
bool grid_tmp[size][size] = {};
for(unsigned int i=0; i < size; i++){
for(unsigned int j=0; j < size; j++){
grid_tmp[i][j] = grid[i][j];
// printf("%d",grid[i][j]);
}
// printf("\n");
}
// for(unsigned int i = 1; i < size-1; i++)
// for(unsigned int j = 1; j < size-1; j++) {
unsigned int count = 0;
// if(grid[i][j]) isAlive = true;
for(int k = -1; k <= 1; k++)
for(int l = -1; l <= 1; l++)
if(k != 0 || l != 0)
if(grid_tmp[m+k][n+l])
++count;
if(count < 2 || count > 3){
grid[m][n] = false;
// printf("m: %d n: %d MORREU\n",m,n);
// printf("count = %d\n", count);
}
else {
if(count == 3){
grid[m][n] = true;
// printf("m: %d n: %d REVIVEU\n",m,n);
}
}
// }
}
// return isAlive;
return;
}
int main(){
// bool grid[size][size] = {}; // dados iniciais
bool** grid = (bool**)malloc(size*sizeof(bool*));
for(int i=0; i<size; i++) grid[i] = (bool*)malloc(size*sizeof(bool));
for(unsigned int i=0; i < size; i++)
for(unsigned int j=0; j < size; j++)
grid[i][j] = false;
grid[ 5][ 7] = true;
grid[ 6][ 8] = true;
grid[ 8][ 8] = true;
grid[ 6][ 9] = true;
grid[ 8][10] = true;
grid[ 9][10] = true;
grid[ 8][11] = true;
grid[10][11] = true;
grid[10][12] = true;
bool** d_grid;
int mem_size = size*sizeof(void*);
hipMalloc((void **) &d_grid, mem_size);
printf("chegou aqui");
for(int i=0; i<size; i++) hipMalloc((void **) &(d_grid[i]), size*sizeof(bool));
int nthreads = 7;
dim3 blocks(size/nthreads+1,size/nthreads+1);
dim3 threads(nthreads,nthreads);
while(someoneAlive(grid)){
for(int i=0; i<size; i++) hipMemcpy(d_grid[i], grid[i], size*sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( jogo), dim3(blocks),dim3(threads), 0, 0, d_grid);
hipDeviceSynchronize();
for(int i=0; i<size; i++) hipMemcpy(grid[i], d_grid[i], size*size*sizeof(bool), hipMemcpyDeviceToHost);
print(grid);
usleep(100000);
return 0;
}
// while (continua) { // loop enquanto algo vivo
// continua = jogo(grid)
// print(grid);
// usleep(100000); // pausa para poder exibir no terminal
// }
}
| 4705462b4a7aa9adf2267eab8b26bd2241e97376.cu | #include <iostream>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define size 21 // Tamanho da matrix
// Exibe os pontos na tela
__host__ void print(bool** grid){
std::cout << "\n\n\n\n\n";
for(unsigned int i = 1; i < size-1; i++) {
for(unsigned int j = 1; j < size-1; j++)
std::cout << (grid[i][j]?"#":"_");
std::cout << std::endl;
}
}
// Exibe os pontos na tela
__device__ void d_print(bool** grid){
printf("\n\n\n\n\n");
for(unsigned int i = 1; i < size-1; i++) {
for(unsigned int j = 1; j < size-1; j++)
if(grid[i][j]==true){
printf("1");
}else{
printf("0");
}
printf("\n");
}
}
__host__ bool someoneAlive(bool** grid){
for(unsigned int i=0; i < size; i++)
for(unsigned int j=0; j < size; j++)
if(grid[i][j]==true) return true;
return false;
}
// Calcula a simulacao
__global__ void jogo(bool** grid){
int m=blockIdx.x*blockDim.x+threadIdx.x;
int n=blockIdx.y*blockDim.y+threadIdx.y;
if (m<size && n<size){
// printf("m: %d n: %d\n",m,n);
// bool isAlive = false;
bool grid_tmp[size][size] = {};
for(unsigned int i=0; i < size; i++){
for(unsigned int j=0; j < size; j++){
grid_tmp[i][j] = grid[i][j];
// printf("%d",grid[i][j]);
}
// printf("\n");
}
// for(unsigned int i = 1; i < size-1; i++)
// for(unsigned int j = 1; j < size-1; j++) {
unsigned int count = 0;
// if(grid[i][j]) isAlive = true;
for(int k = -1; k <= 1; k++)
for(int l = -1; l <= 1; l++)
if(k != 0 || l != 0)
if(grid_tmp[m+k][n+l])
++count;
if(count < 2 || count > 3){
grid[m][n] = false;
// printf("m: %d n: %d MORREU\n",m,n);
// printf("count = %d\n", count);
}
else {
if(count == 3){
grid[m][n] = true;
// printf("m: %d n: %d REVIVEU\n",m,n);
}
}
// }
}
// return isAlive;
return;
}
int main(){
// bool grid[size][size] = {}; // dados iniciais
bool** grid = (bool**)malloc(size*sizeof(bool*));
for(int i=0; i<size; i++) grid[i] = (bool*)malloc(size*sizeof(bool));
for(unsigned int i=0; i < size; i++)
for(unsigned int j=0; j < size; j++)
grid[i][j] = false;
grid[ 5][ 7] = true;
grid[ 6][ 8] = true;
grid[ 8][ 8] = true;
grid[ 6][ 9] = true;
grid[ 8][10] = true;
grid[ 9][10] = true;
grid[ 8][11] = true;
grid[10][11] = true;
grid[10][12] = true;
bool** d_grid;
int mem_size = size*sizeof(void*);
cudaMalloc((void **) &d_grid, mem_size);
printf("chegou aqui");
for(int i=0; i<size; i++) cudaMalloc((void **) &(d_grid[i]), size*sizeof(bool));
int nthreads = 7;
dim3 blocks(size/nthreads+1,size/nthreads+1);
dim3 threads(nthreads,nthreads);
while(someoneAlive(grid)){
for(int i=0; i<size; i++) cudaMemcpy(d_grid[i], grid[i], size*sizeof(bool), cudaMemcpyHostToDevice);
jogo<<<blocks,threads>>>(d_grid);
cudaDeviceSynchronize();
for(int i=0; i<size; i++) cudaMemcpy(grid[i], d_grid[i], size*size*sizeof(bool), cudaMemcpyDeviceToHost);
print(grid);
usleep(100000);
return 0;
}
// while (continua) { // loop enquanto algo vivo
// continua = jogo(grid)
// print(grid);
// usleep(100000); // pausa para poder exibir no terminal
// }
}
|
341174a8f934cd3048ecb7e14fcdd242022a86b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
__constant__ unsigned int shift1[4] = {6, 2, 13, 3};
__constant__ unsigned int shift2[4] = {13, 27, 21, 12};
__constant__ unsigned int shift3[4] = {18, 2, 7, 13};
__constant__ unsigned int offset[4] = {4294967294, 4294967288, 4294967280, 4294967168};
__shared__ unsigned int randStates[32];
__device__ unsigned int TausStep(unsigned int &z, int S1, int S2, int S3, unsigned int M) {
unsigned int b = (((z << S1) ^ z) >> S2);
return z = (((z &M) << S3) ^ b);
}
__device__ unsigned int randInt() {
TausStep(randStates[threadIdx.x&31], shift1[threadIdx.x&3], shift2[threadIdx.x&3],shift3[threadIdx.x&3],offset[threadIdx.x&3]);
return (randStates[(threadIdx.x)&31]^randStates[(threadIdx.x+1)&31]^randStates[(threadIdx.x+2)&31]^randStates[(threadIdx.x+3)&31]);
}
__global__ void sync_test(void) {
__shared__ int shared_int;
int count = 0;
long long timeout = 0;
if (threadIdx.x == 0) {
shared_int = 0;
}
__syncthreads();
if (threadIdx.x == 0) {
// occupy thread0
while (count < 100) {
for (int i=0; i<200; i++){
randInt();
}
if (++timeout > 1000000) {
break;
}
count++;
if (count > 50) {
count = 0;
}
}
shared_int = 1;
}
__syncthreads();
printf("%d\n", shared_int);
}
int main(void)
{
hipLaunchKernelGGL(( sync_test), dim3(1), dim3(4), 0, 0, );
hipDeviceSynchronize();
return 0;
}
/* prints:
1
1
1
1
*/
| 341174a8f934cd3048ecb7e14fcdd242022a86b0.cu | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
__constant__ unsigned int shift1[4] = {6, 2, 13, 3};
__constant__ unsigned int shift2[4] = {13, 27, 21, 12};
__constant__ unsigned int shift3[4] = {18, 2, 7, 13};
__constant__ unsigned int offset[4] = {4294967294, 4294967288, 4294967280, 4294967168};
__shared__ unsigned int randStates[32];
__device__ unsigned int TausStep(unsigned int &z, int S1, int S2, int S3, unsigned int M) {
unsigned int b = (((z << S1) ^ z) >> S2);
return z = (((z &M) << S3) ^ b);
}
__device__ unsigned int randInt() {
TausStep(randStates[threadIdx.x&31], shift1[threadIdx.x&3], shift2[threadIdx.x&3],shift3[threadIdx.x&3],offset[threadIdx.x&3]);
return (randStates[(threadIdx.x)&31]^randStates[(threadIdx.x+1)&31]^randStates[(threadIdx.x+2)&31]^randStates[(threadIdx.x+3)&31]);
}
__global__ void sync_test(void) {
__shared__ int shared_int;
int count = 0;
long long timeout = 0;
if (threadIdx.x == 0) {
shared_int = 0;
}
__syncthreads();
if (threadIdx.x == 0) {
// occupy thread0
while (count < 100) {
for (int i=0; i<200; i++){
randInt();
}
if (++timeout > 1000000) {
break;
}
count++;
if (count > 50) {
count = 0;
}
}
shared_int = 1;
}
__syncthreads();
printf("%d\n", shared_int);
}
int main(void)
{
sync_test<<<1, 4>>>();
cudaDeviceSynchronize();
return 0;
}
/* prints:
1
1
1
1
*/
|
69a8c332cc932fe077413559392f4cfe1951036d.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorCopy.hip"
#else
void THCTensor_(copy)(THCState* state, THCTensor* dst, THCTensor* src) {
if (dst == src) return;
at::Tensor dst_wrap = THTensor_wrap(dst);
at::Tensor src_wrap = THTensor_wrap(src);
at::native::copy_(dst_wrap, src_wrap);
}
template <>
THCTensor *THCTensor_newClone<scalar_t>(THCState *state, THCTensor *self) {
THCTensor* tensor =
// THCTensor_new(state, THTensor_getStoragePtr(self)->dtype());
THCTensor_new(state, self->dtype());
THCTensor_resizeAs(state, tensor, self);
at::Tensor tensor_wrap = THTensor_wrap(tensor);
at::Tensor self_wrap = THTensor_wrap(self);
at::native::copy_(tensor_wrap, self_wrap);
return tensor;
}
template <>
THCTensor *THCTensor_newContiguous<scalar_t>(THCState *state, THCTensor *self)
{
if(!self->is_contiguous()) {
return THCTensor_newClone<scalar_t>(state, self);
} else {
THCTensor_retain(state, self);
return self;
}
}
template <>
void THCTensor_freeCopyTo<scalar_t>(THCState *state, THCTensor *self, THCTensor *dst) {
if(self != dst) {
at::Tensor dst_wrap = THTensor_wrap(dst);
at::Tensor self_wrap = THTensor_wrap(self);
at::native::copy_(dst_wrap, self_wrap);
}
THCTensor_free(state, self);
}
#endif
| 69a8c332cc932fe077413559392f4cfe1951036d.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorCopy.cu"
#else
void THCTensor_(copy)(THCState* state, THCTensor* dst, THCTensor* src) {
if (dst == src) return;
at::Tensor dst_wrap = THTensor_wrap(dst);
at::Tensor src_wrap = THTensor_wrap(src);
at::native::copy_(dst_wrap, src_wrap);
}
template <>
THCTensor *THCTensor_newClone<scalar_t>(THCState *state, THCTensor *self) {
THCTensor* tensor =
// THCTensor_new(state, THTensor_getStoragePtr(self)->dtype());
THCTensor_new(state, self->dtype());
THCTensor_resizeAs(state, tensor, self);
at::Tensor tensor_wrap = THTensor_wrap(tensor);
at::Tensor self_wrap = THTensor_wrap(self);
at::native::copy_(tensor_wrap, self_wrap);
return tensor;
}
template <>
THCTensor *THCTensor_newContiguous<scalar_t>(THCState *state, THCTensor *self)
{
if(!self->is_contiguous()) {
return THCTensor_newClone<scalar_t>(state, self);
} else {
THCTensor_retain(state, self);
return self;
}
}
template <>
void THCTensor_freeCopyTo<scalar_t>(THCState *state, THCTensor *self, THCTensor *dst) {
if(self != dst) {
at::Tensor dst_wrap = THTensor_wrap(dst);
at::Tensor self_wrap = THTensor_wrap(self);
at::native::copy_(dst_wrap, self_wrap);
}
THCTensor_free(state, self);
}
#endif
|
5c73d5dcdd8fa60111bd09a00d31658589b8b1e9.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************
streamcluster_cuda_header.cu
: header file to streamcluster
- original code from PARSEC Benchmark Suite
- parallelization with CUDA API has been applied by
Sang-Ha (a.k.a Shawn) Lee - [email protected]
University of Virginia
Department of Electrical and Computer Engineering
Department of Computer Science
***********************************************/
#ifndef STREAMCLUSTER_CUDA_HEADER_CU
#define STREAMCLUSTER_CUDA_HEADER_CU
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <sys/resource.h>
#include <limits.h>
#include <hip/hip_runtime.h>
#include<vector_functions.h>
#include "newhalf.hpp"
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
using namespace std;
/* this structure represents a point */
/* these will be passed around to avoid copying coordinates */
typedef struct {
float weight;
float *coord;
long assign; /* number of point where this one is assigned */
float cost; /* cost of that assignment, weight*distance */
} Point;
typedef struct {
uint32_t weight;
float *coord;
long2 assign; /* number of point where this one is assigned */
uint32_t cost; /* cost of that assignment, weight*distance */
} Point_half2;
/* this is the array of points */
typedef struct {
long num; /* number of points; may not be N if this is a sample */
int dim; /* dimensionality */
Point *p; /* the array itself */
} Points;
struct pkmedian_arg_t
{
Points* points;
long kmin;
long kmax;
long* kfinal;
int pid;
pthread_barrier_t* barrier;
};
class PStream {
public:
virtual size_t read( float* dest, int dim, int num ) = 0;
virtual int ferror() = 0;
virtual int feof() = 0;
virtual ~PStream() {
}
};
//synthetic stream
class SimStream : public PStream {
public:
SimStream(long n_ ) {
n = n_;
}
size_t read( float* dest, int dim, int num ) {
size_t count = 0;
for( int i = 0; i < num && n > 0; i++ ) {
for( int k = 0; k < dim; k++ ) {
dest[i*dim + k] = lrand48()/(float)INT_MAX;
}
n--;
count++;
}
return count;
}
int ferror() {
return 0;
}
int feof() {
return n <= 0;
}
~SimStream() {
}
private:
long n;
};
class FileStream : public PStream {
public:
FileStream(char* filename) {
fp = fopen( filename, "rb");
if( fp == NULL ) {
fprintf(stderr,"error opening file %s\n.",filename);
exit(1);
}
}
size_t read( float* dest, int dim, int num ) {
return std::fread(dest, sizeof(float)*dim, num, fp);
}
int ferror() {
return std::ferror(fp);
}
int feof() {
return std::feof(fp);
}
~FileStream() {
printf("closing file stream\n");
fclose(fp);
}
private:
FILE* fp;
};
/* function prototypes */
double gettime();
int isIdentical(float*, float*, int);
//static int floatcomp(const void*, const void*);
void shuffle(Points*);
void intshuffle(int*, int);
float waste(float);
float dist(Point, Point, int);
float pspeedy(Points*, float, long, int, pthread_barrier_t*);
float pgain_old(long, Points*, float, long int*, int, pthread_barrier_t*);
float pFL(Points*, int*, int, float, long*, float, long, float, int, pthread_barrier_t*);
int selectfeasible_fast(Points*, int**, int, int, pthread_barrier_t*);
float pkmedian(Points*, long, long, long*, int, pthread_barrier_t*);
int contcenters(Points*);
void copycenters(Points*, Points*, long*, long);
void* localSearchSub(void*);
void localSearch(Points*, long, long, long*);
void outcenterIDs(Points*, long*, char*);
void streamCluster(PStream*, long, long, int, long, long, char*);
float pgain(long, Points*, float, long int*, int, bool*, int*, bool*, bool, double*, double*, double*, double*, double*, double*);
void allocDevMem(int, int, int);
void allocHostMem(int, int, int);
void freeDevMem();
void freeHostMem();
#endif
| 5c73d5dcdd8fa60111bd09a00d31658589b8b1e9.cu | /************************************************
streamcluster_cuda_header.cu
: header file to streamcluster
- original code from PARSEC Benchmark Suite
- parallelization with CUDA API has been applied by
Sang-Ha (a.k.a Shawn) Lee - [email protected]
University of Virginia
Department of Electrical and Computer Engineering
Department of Computer Science
***********************************************/
#ifndef STREAMCLUSTER_CUDA_HEADER_CU
#define STREAMCLUSTER_CUDA_HEADER_CU
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <sys/resource.h>
#include <limits.h>
#include <cuda.h>
#include<vector_functions.h>
#include "newhalf.hpp"
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
using namespace std;
/* this structure represents a point */
/* these will be passed around to avoid copying coordinates */
typedef struct {
float weight;
float *coord;
long assign; /* number of point where this one is assigned */
float cost; /* cost of that assignment, weight*distance */
} Point;
typedef struct {
uint32_t weight;
float *coord;
long2 assign; /* number of point where this one is assigned */
uint32_t cost; /* cost of that assignment, weight*distance */
} Point_half2;
/* this is the array of points */
typedef struct {
long num; /* number of points; may not be N if this is a sample */
int dim; /* dimensionality */
Point *p; /* the array itself */
} Points;
struct pkmedian_arg_t
{
Points* points;
long kmin;
long kmax;
long* kfinal;
int pid;
pthread_barrier_t* barrier;
};
class PStream {
public:
virtual size_t read( float* dest, int dim, int num ) = 0;
virtual int ferror() = 0;
virtual int feof() = 0;
virtual ~PStream() {
}
};
//synthetic stream
class SimStream : public PStream {
public:
SimStream(long n_ ) {
n = n_;
}
size_t read( float* dest, int dim, int num ) {
size_t count = 0;
for( int i = 0; i < num && n > 0; i++ ) {
for( int k = 0; k < dim; k++ ) {
dest[i*dim + k] = lrand48()/(float)INT_MAX;
}
n--;
count++;
}
return count;
}
int ferror() {
return 0;
}
int feof() {
return n <= 0;
}
~SimStream() {
}
private:
long n;
};
class FileStream : public PStream {
public:
FileStream(char* filename) {
fp = fopen( filename, "rb");
if( fp == NULL ) {
fprintf(stderr,"error opening file %s\n.",filename);
exit(1);
}
}
size_t read( float* dest, int dim, int num ) {
return std::fread(dest, sizeof(float)*dim, num, fp);
}
int ferror() {
return std::ferror(fp);
}
int feof() {
return std::feof(fp);
}
~FileStream() {
printf("closing file stream\n");
fclose(fp);
}
private:
FILE* fp;
};
/* function prototypes */
double gettime();
int isIdentical(float*, float*, int);
//static int floatcomp(const void*, const void*);
void shuffle(Points*);
void intshuffle(int*, int);
float waste(float);
float dist(Point, Point, int);
float pspeedy(Points*, float, long, int, pthread_barrier_t*);
float pgain_old(long, Points*, float, long int*, int, pthread_barrier_t*);
float pFL(Points*, int*, int, float, long*, float, long, float, int, pthread_barrier_t*);
int selectfeasible_fast(Points*, int**, int, int, pthread_barrier_t*);
float pkmedian(Points*, long, long, long*, int, pthread_barrier_t*);
int contcenters(Points*);
void copycenters(Points*, Points*, long*, long);
void* localSearchSub(void*);
void localSearch(Points*, long, long, long*);
void outcenterIDs(Points*, long*, char*);
void streamCluster(PStream*, long, long, int, long, long, char*);
float pgain(long, Points*, float, long int*, int, bool*, int*, bool*, bool, double*, double*, double*, double*, double*, double*);
void allocDevMem(int, int, int);
void allocHostMem(int, int, int);
void freeDevMem();
void freeHostMem();
#endif
|
b4c3baa36519f891b76a601b7b89588ca417a932.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
#define BSZ (16)
void checkErrors(char *label) {
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess) {
char *e = (char *) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
err = hipGetLastError();
if (err != hipSuccess) {
char *e = (char *) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
}
double get_time() {
struct timeval tim;
hipDeviceSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec + (tim.tv_usec / 1000000.0);
}
// GPU kernel
__global__ void copy_array(float *u, float *u_prev, int N) {
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y * BSZ * N + blockIdx.x * BSZ + j * N + i;
if (I >= N * N) { return; }
u_prev[I] = u[I];
}
__global__ void update(float *u, float *u_prev, int N, float h, float dt, float alpha) {
// Setting up indices
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y * BSZ * N + blockIdx.x * BSZ + j * N + i;
if (I >= N * N) { return; }
__shared__ float u_prev_sh[BSZ][BSZ];
u_prev_sh[i][j] = u_prev[I];
__syncthreads();
bool bound_check = ((I > N) && (I < N * N - 1 - N) && (I % N != 0) && (I % N != N - 1));
bool block_check = ((i > 0) && (i < BSZ - 1) && (j > 0) && (j < BSZ - 1));
// if not on block boundary do
if (block_check) {
u[I] = u_prev_sh[i][j] + alpha * dt / h / h * (u_prev_sh[i + 1][j] + u_prev_sh[i - 1][j] + u_prev_sh[i][j + 1] +
u_prev_sh[i][j - 1] - 4 * u_prev_sh[i][j]);
}
// if not on boundary
else if (bound_check)
//if (bound_check)
{
u[I] = u_prev[I] +
alpha * dt / (h * h) * (u_prev[I + 1] + u_prev[I - 1] + u_prev[I + N] + u_prev[I - N] - 4 * u_prev[I]);
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
int main(int argc, char *argv[]) {
// Allocate in CPU
int N = atoi(argv[1]);
hipSetDevice(0);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax - xmin) / (N - 1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = (int) ceil(time / dt);
int I;
float *x = new float[N * N];
float *y = new float[N * N];
float *u = new float[N * N];
float *u_prev = new float[N * N];
// Generate mesh and intial condition
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
I = N * j + i;
x[I] = xmin + h * i;
y[I] = ymin + h * j;
u[I] = 0.0f;
if ((i == 0) || (j == 0)) { u[I] = 200.0f; }
}
}
// Allocate in GPU
float *u_d, *u_prev_d;
hipMalloc((void **) &u_d, N * N * sizeof(float));
hipMalloc((void **) &u_prev_d, N * N * sizeof(float));
// Copy to GPU
hipMemcpy(u_d, u, N * N * sizeof(float), hipMemcpyHostToDevice);
// Loop
dim3 dimGrid(int((N - 0.5) / BSZ) + 1, int((N - 0.5) / BSZ) + 1);
dim3 dimBlock(BSZ, BSZ);
double start = get_time();
for (int t = 0; t < steps; t++) {
hipLaunchKernelGGL(( copy_array) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N);
hipLaunchKernelGGL(( update) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N, h, dt, alpha);
}
double stop = get_time();
checkErrors("update");
double elapsed = stop - start;
std::cout << elapsed << std::endl;
// Copy result back to host
hipMemcpy(u, u_d, N * N * sizeof(float), hipMemcpyDeviceToHost);
std::ofstream temperature("temperature_shared.txt");
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
I = N * j + i;
// std::cout<<u[I]<<"\t";
temperature << x[I] << "\t" << y[I] << "\t" << u[I] << std::endl;
}
temperature << "\n";
//std::cout<<std::endl;
}
temperature.close();
// Free device
hipFree(u_d);
hipFree(u_prev_d);
}
| b4c3baa36519f891b76a601b7b89588ca417a932.cu | #include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
#define BSZ (16)
void checkErrors(char *label) {
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess) {
char *e = (char *) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess) {
char *e = (char *) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
}
double get_time() {
struct timeval tim;
cudaThreadSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec + (tim.tv_usec / 1000000.0);
}
// GPU kernel
__global__ void copy_array(float *u, float *u_prev, int N) {
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y * BSZ * N + blockIdx.x * BSZ + j * N + i;
if (I >= N * N) { return; }
u_prev[I] = u[I];
}
__global__ void update(float *u, float *u_prev, int N, float h, float dt, float alpha) {
// Setting up indices
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y * BSZ * N + blockIdx.x * BSZ + j * N + i;
if (I >= N * N) { return; }
__shared__ float u_prev_sh[BSZ][BSZ];
u_prev_sh[i][j] = u_prev[I];
__syncthreads();
bool bound_check = ((I > N) && (I < N * N - 1 - N) && (I % N != 0) && (I % N != N - 1));
bool block_check = ((i > 0) && (i < BSZ - 1) && (j > 0) && (j < BSZ - 1));
// if not on block boundary do
if (block_check) {
u[I] = u_prev_sh[i][j] + alpha * dt / h / h * (u_prev_sh[i + 1][j] + u_prev_sh[i - 1][j] + u_prev_sh[i][j + 1] +
u_prev_sh[i][j - 1] - 4 * u_prev_sh[i][j]);
}
// if not on boundary
else if (bound_check)
//if (bound_check)
{
u[I] = u_prev[I] +
alpha * dt / (h * h) * (u_prev[I + 1] + u_prev[I - 1] + u_prev[I + N] + u_prev[I - N] - 4 * u_prev[I]);
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
int main(int argc, char *argv[]) {
// Allocate in CPU
int N = atoi(argv[1]);
cudaSetDevice(0);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax - xmin) / (N - 1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = (int) ceil(time / dt);
int I;
float *x = new float[N * N];
float *y = new float[N * N];
float *u = new float[N * N];
float *u_prev = new float[N * N];
// Generate mesh and intial condition
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
I = N * j + i;
x[I] = xmin + h * i;
y[I] = ymin + h * j;
u[I] = 0.0f;
if ((i == 0) || (j == 0)) { u[I] = 200.0f; }
}
}
// Allocate in GPU
float *u_d, *u_prev_d;
cudaMalloc((void **) &u_d, N * N * sizeof(float));
cudaMalloc((void **) &u_prev_d, N * N * sizeof(float));
// Copy to GPU
cudaMemcpy(u_d, u, N * N * sizeof(float), cudaMemcpyHostToDevice);
// Loop
dim3 dimGrid(int((N - 0.5) / BSZ) + 1, int((N - 0.5) / BSZ) + 1);
dim3 dimBlock(BSZ, BSZ);
double start = get_time();
for (int t = 0; t < steps; t++) {
copy_array <<<dimGrid, dimBlock>>>(u_d, u_prev_d, N);
update <<<dimGrid, dimBlock>>>(u_d, u_prev_d, N, h, dt, alpha);
}
double stop = get_time();
checkErrors("update");
double elapsed = stop - start;
std::cout << elapsed << std::endl;
// Copy result back to host
cudaMemcpy(u, u_d, N * N * sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream temperature("temperature_shared.txt");
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
I = N * j + i;
// std::cout<<u[I]<<"\t";
temperature << x[I] << "\t" << y[I] << "\t" << u[I] << std::endl;
}
temperature << "\n";
//std::cout<<std::endl;
}
temperature.close();
// Free device
cudaFree(u_d);
cudaFree(u_prev_d);
}
|
accf83b0f13b03bc0d546721b9b9d84207225466.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void vecSum_GPU3(const double *a, double *res, const unsigned long n);
__global__ void vecSum_betterGPU(const double *a, double *gpuRes, const unsigned long n, int offset);
void random_double(double *a, int n);
int main(int argc, char* argv[]) {
double n = atof(argv[1]);
printf("n = %f\n", n);
hipDeviceProp_t devProperties;
hipGetDeviceProperties(&devProperties, 0);
unsigned int ThreadsPerBlock = devProperties.maxThreadsPerBlock;
unsigned int BlocksPerGrid = (n + (devProperties.maxThreadsPerBlock-1)) / devProperties.maxThreadsPerBlock;
int size = n * sizeof(double);
int resSize = n * sizeof(double) / 1024;
double *vec = (double *) malloc(size);
double *pinnedVec;
hipHostMalloc(&pinnedVec, size);
double *gpuVec;
double *gpuRes; // gpuRes type is (maybe) not correct
// double cpuRes = 0;
double *res = (double *) malloc(resSize);
// srand(time(NULL)); // get your seed!
// random_double(vec, n);
// memcpy(pinnedVec, vec, size);
for (int i = 0; i < (int) n; i++) {
pinnedVec[i] = 1;
}
// CPU Time
// clock_t cputime = clock();
// cpuRes = 0;
// for (int i = 0; i < n; i++) {
// cpuRes += vec[i];
// }
// printf("CPU Result: %f\n", cpuRes);
// printf("Time: %f s\n", ((double)clock() - cputime) / CLOCKS_PER_SEC);
// GPU Time
clock_t gputime = clock();
hipMalloc(&gpuVec, size);
hipMalloc(&gpuRes, size);
hipMemcpy(gpuVec, pinnedVec, size, hipMemcpyHostToDevice);
int numOfRuns = ceil(n / (1024*1024));
printf("numOfRuns = %d\n", numOfRuns);
for (int i = 0; i < numOfRuns; i++) {
hipLaunchKernelGGL(( vecSum_betterGPU), dim3(BlocksPerGrid), dim3(ThreadsPerBlock), 0, 0, (double *)(gpuVec + i*(1024*1024)), gpuRes, n, i);
hipMemcpy(gpuVec, gpuRes, size, hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( vecSum_betterGPU), dim3(BlocksPerGrid), dim3(ThreadsPerBlock), 0, 0, gpuVec, gpuRes, n, i);
}
hipMemcpy(res, gpuRes, sizeof(double), hipMemcpyDeviceToHost);
printf("GPU Result: %f\n", res[0]);
printf("Time: %f s\n", ((double) clock() - gputime) / CLOCKS_PER_SEC);
hipFree(gpuVec);
hipFree(gpuRes);
hipFree(pinnedVec);
}
__global__ void vecSum_betterGPU(const double *a, double *gpuRes, const unsigned long n, int offset) {
// dynamic shared memory size
__shared__ double tmp[1024];
// copy in shared memory
unsigned long i = blockIdx.x * blockDim.x + threadIdx.x + offset*1024*1024;
if (i < n) {
tmp[threadIdx.x] = a[i];
}
__syncthreads();
// do reduction in shared memory
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
tmp[threadIdx.x] += tmp[threadIdx.x + s];
}
__syncthreads();
}
// last thread writes result
if (threadIdx.x == 0) {
gpuRes[blockIdx.x + offset * 1024] = tmp[0];
}
}
__global__ void vecSum_GPU3(const double *a, double *res, const unsigned long n) {
// dynamic shared memory size
__shared__ double tmp[1024];
// copy in shared memory
unsigned long i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
tmp[threadIdx.x] = a[i];
}
__syncthreads();
// do reduction in shared memory
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
tmp[threadIdx.x] += tmp[threadIdx.x + s];
}
__syncthreads();
}
// last thread writes result
if (threadIdx.x == 0) {
res[blockIdx.x] = tmp[0];
}
}
void random_double(double *a, int n) {
for (int i = 0; i < n; i++)
a[i] = rand() % 10000; // random number between 0 and 9999
}
| accf83b0f13b03bc0d546721b9b9d84207225466.cu |
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void vecSum_GPU3(const double *a, double *res, const unsigned long n);
__global__ void vecSum_betterGPU(const double *a, double *gpuRes, const unsigned long n, int offset);
void random_double(double *a, int n);
int main(int argc, char* argv[]) {
double n = atof(argv[1]);
printf("n = %f\n", n);
cudaDeviceProp devProperties;
cudaGetDeviceProperties(&devProperties, 0);
unsigned int ThreadsPerBlock = devProperties.maxThreadsPerBlock;
unsigned int BlocksPerGrid = (n + (devProperties.maxThreadsPerBlock-1)) / devProperties.maxThreadsPerBlock;
int size = n * sizeof(double);
int resSize = n * sizeof(double) / 1024;
double *vec = (double *) malloc(size);
double *pinnedVec;
cudaMallocHost(&pinnedVec, size);
double *gpuVec;
double *gpuRes; // gpuRes type is (maybe) not correct
// double cpuRes = 0;
double *res = (double *) malloc(resSize);
// srand(time(NULL)); // get your seed!
// random_double(vec, n);
// memcpy(pinnedVec, vec, size);
for (int i = 0; i < (int) n; i++) {
pinnedVec[i] = 1;
}
// CPU Time
// clock_t cputime = clock();
// cpuRes = 0;
// for (int i = 0; i < n; i++) {
// cpuRes += vec[i];
// }
// printf("CPU Result: %f\n", cpuRes);
// printf("Time: %f s\n", ((double)clock() - cputime) / CLOCKS_PER_SEC);
// GPU Time
clock_t gputime = clock();
cudaMalloc(&gpuVec, size);
cudaMalloc(&gpuRes, size);
cudaMemcpy(gpuVec, pinnedVec, size, cudaMemcpyHostToDevice);
int numOfRuns = ceil(n / (1024*1024));
printf("numOfRuns = %d\n", numOfRuns);
for (int i = 0; i < numOfRuns; i++) {
vecSum_betterGPU<<<BlocksPerGrid, ThreadsPerBlock>>>((double *)(gpuVec + i*(1024*1024)), gpuRes, n, i);
cudaMemcpy(gpuVec, gpuRes, size, cudaMemcpyDeviceToDevice);
vecSum_betterGPU<<<BlocksPerGrid, ThreadsPerBlock>>>(gpuVec, gpuRes, n, i);
}
cudaMemcpy(res, gpuRes, sizeof(double), cudaMemcpyDeviceToHost);
printf("GPU Result: %f\n", res[0]);
printf("Time: %f s\n", ((double) clock() - gputime) / CLOCKS_PER_SEC);
cudaFree(gpuVec);
cudaFree(gpuRes);
cudaFree(pinnedVec);
}
__global__ void vecSum_betterGPU(const double *a, double *gpuRes, const unsigned long n, int offset) {
// dynamic shared memory size
__shared__ double tmp[1024];
// copy in shared memory
unsigned long i = blockIdx.x * blockDim.x + threadIdx.x + offset*1024*1024;
if (i < n) {
tmp[threadIdx.x] = a[i];
}
__syncthreads();
// do reduction in shared memory
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
tmp[threadIdx.x] += tmp[threadIdx.x + s];
}
__syncthreads();
}
// last thread writes result
if (threadIdx.x == 0) {
gpuRes[blockIdx.x + offset * 1024] = tmp[0];
}
}
__global__ void vecSum_GPU3(const double *a, double *res, const unsigned long n) {
// dynamic shared memory size
__shared__ double tmp[1024];
// copy in shared memory
unsigned long i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
tmp[threadIdx.x] = a[i];
}
__syncthreads();
// do reduction in shared memory
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
tmp[threadIdx.x] += tmp[threadIdx.x + s];
}
__syncthreads();
}
// last thread writes result
if (threadIdx.x == 0) {
res[blockIdx.x] = tmp[0];
}
}
void random_double(double *a, int n) {
for (int i = 0; i < n; i++)
a[i] = rand() % 10000; // random number between 0 and 9999
}
|
0fd9447559b90d8f625c2fa1a93f6fad2c6fd581.hip | // !!! This is a file automatically generated by hipify!!!
#define LIMIT -999
#define BLOCK_SIZE 16
#define MAX_SEQ_LEN 2100
#define MAX_SEQ_NUM 1024
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "needle.h"
#include "needle_cpu.h"
#include <pthread.h>
#define LENGTH 1600
//#define TRACEBACK
// includes, kernels
#include "needle_cpu.c"
//#include "needle_kernel_dynamic.cu"
#include "needle_kernel_diagonal.cu"
inline void cudaCheckError(int line, hipError_t ce)
{
if (ce != hipSuccess) {
printf("Error: line %d %s\n", line, hipGetErrorString(ce));
exit(1);
}
}
// HACK Huan's hack
// this is not the updated validation code
int validation(int *score_matrix_cpu, int *score_matrix, unsigned int length)
{
unsigned int i = 0;
while (i!=length) {
if ( (score_matrix_cpu[i]) == (score_matrix[i] >> 2) ) {
++i;
continue;
}
else {
printf("i = %d, expected %d, got %d.\n",i, score_matrix_cpu[i], score_matrix[i] >> 2);
return 0;
}
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <pair number> <penalty> \n", argv[0]);
fprintf(stderr, "\t<pair number> - times of comparison\n");
fprintf(stderr, "\t<penalty> - penalty(negative integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
double time, end_time;
int pair_num;
short penalty;
char sequence_set1[MAX_SEQ_LEN*MAX_SEQ_NUM] = {0}, sequence_set2[MAX_SEQ_LEN*MAX_SEQ_NUM] = {0};
unsigned int pos1[MAX_SEQ_NUM] = {0}, pos2[MAX_SEQ_NUM] = {0}, pos_matrix[MAX_SEQ_NUM] = {0};
int *score_matrix;
int *trace_matrix;
int *score_matrix_cpu;
int *trace_matrix_cpu;
char *d_sequence_set1, *d_sequence_set2;
unsigned int *d_pos1, *d_pos2, *d_pos_matrix;
int *d_score_matrix;
int seq1_len, seq2_len;
if (argc == 3)
{
pair_num = atoi(argv[1]);
penalty = atoi(argv[2]);
if (pair_num>MAX_SEQ_NUM) {
fprintf(stderr, "\t<number of pairs> - number of pairs, must be less than %d\n",MAX_SEQ_NUM);
exit(1);
}
}
else {
usage(argc, argv);
}
// first API
time = gettime();
cudaCheckError( __LINE__, hipSetDevice(0) );
end_time = gettime();
fprintf(stdout,"First API,%lf\n",end_time-time);
time = end_time;
// Get input data
srand ( 7 );
pos_matrix[0] = pos1[0] = pos2[0] = 0;
for (int i=0; i<pair_num; ++i) {
//please define your own sequence 1
seq1_len = LENGTH; //64+rand() % 20;
//printf("Seq1 length: %d\n", seq1_len);
for (int j=0; j<seq1_len; ++j)
sequence_set1[ pos1[i] + j ] = rand() % 20 + 'A';
pos1[i+1] = pos1[i] + seq1_len;
//please define your own sequence 2.
seq2_len = LENGTH;//64+rand() % 20;
//printf("Seq2 length: %d\n\n", seq2_len);
for (int j=0; j<seq2_len; ++j)
sequence_set2[ pos2[i] +j ] = rand() % 20 + 'A';
pos2[i+1] = pos2[i] + seq2_len;
//printf("Matrix size increase: %d\n", (seq1_len+1) * (seq2_len+1));
pos_matrix[i+1] = pos_matrix[i] + (seq1_len+1) * (seq2_len+1);
}
score_matrix = (int *)malloc( pos_matrix[pair_num]*sizeof(int));
score_matrix_cpu = (int *)malloc( pos_matrix[pair_num]*sizeof(int));
#ifdef _LP64
printf ("Running on a 64-bit platform!\n");
#else
#endif
/*
short M = -1;
printf("M: "BYTETOBINARYPATTERN" "BYTETOBINARYPATTERN"\n",
BYTETOBINARY(M>>8), BYTETOBINARY(M));
*/
printf ("Allocating %dMB of memory... \
(sizeof int=%d bytes, sizeof short=%d bytes)\n",
pos_matrix[pair_num]*sizeof(int)/1024/1024,
sizeof(int),
sizeof(short)
);
// printf("Start Needleman-Wunsch\n");
cudaCheckError( __LINE__, hipMalloc( (void**)&d_sequence_set1, sizeof(char)*pos1[pair_num] ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_sequence_set2, sizeof(char)*pos2[pair_num] ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_score_matrix, sizeof(int)*pos_matrix[pair_num]) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_pos1, sizeof(unsigned int)*(pair_num+1) ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_pos2, sizeof(unsigned int)*(pair_num+1) ) );
cudaCheckError( __LINE__, hipMalloc( (void**)&d_pos_matrix, sizeof(unsigned int)*(pair_num+1) ) );
time = gettime();
needleman_cpu(sequence_set1, sequence_set2, pos1, pos2, score_matrix_cpu, pos_matrix, pair_num, penalty);
// CPU phases
end_time = gettime();
fprintf(stdout,"CPU time,%lf\n",end_time-time);
time = end_time;
// Memcpy to device
cudaCheckError( __LINE__,
hipMemcpy( d_sequence_set1, sequence_set1, sizeof(char)*pos1[pair_num], hipMemcpyHostToDevice )
);
cudaCheckError( __LINE__,
hipMemcpy( d_sequence_set2, sequence_set2, sizeof(char)*pos2[pair_num], hipMemcpyHostToDevice )
);
cudaCheckError( __LINE__,
hipMemcpy( d_pos1, pos1, sizeof(unsigned int)*(pair_num+1), hipMemcpyHostToDevice )
);
cudaCheckError( __LINE__,
hipMemcpy( d_pos2, pos2, sizeof(unsigned int)*(pair_num+1), hipMemcpyHostToDevice )
);
cudaCheckError( __LINE__,
hipMemcpy( d_pos_matrix, pos_matrix, sizeof(unsigned int)*(pair_num+1), hipMemcpyHostToDevice )
);
//end_time = gettime();
//fprintf(stdout,"Memcpy to device,%lf\n",end_time-time);
//time = end_time;
hipLaunchKernelGGL(( needleman_cuda_diagonal), dim3(pair_num),dim3(512), 0, 0, d_sequence_set1, d_sequence_set2,
d_pos1, d_pos2,
d_score_matrix, d_pos_matrix,
pair_num, penalty);
cudaCheckError( __LINE__, hipDeviceSynchronize() );
//end_time = gettime();
//fprintf(stdout,"kernel,%lf\n",end_time-time);
//time = end_time;
// Memcpy to host
cudaCheckError( __LINE__, hipMemcpy( score_matrix, d_score_matrix, sizeof(int)*pos_matrix[pair_num], hipMemcpyDeviceToHost ) );
end_time = gettime();
//fprintf(stdout,"Memcpy to host,%lf\n",end_time-time);
fprintf(stdout,"GPU time, %lf\n",end_time-time);
time = end_time;
if ( validation(score_matrix_cpu, score_matrix, pos_matrix[pair_num]) )
printf("Validation: PASS\n");
else
printf("Validation: FAIL\n");
#ifdef TRACEBACK
printf("Here comes the result of the first pair...\n");
int seq1_begin = pos1[0];
int seq1_end = pos1[1];
int seq2_begin = pos2[0];
int seq2_end = pos2[1];
int *current_matrix = score_matrix + pos_matrix[0];
printf("1st seq len = %d =\n%.*s\n", seq1_end - seq1_begin, seq1_end - seq1_begin, sequence_set1 + seq1_begin);
printf("2nd seq len = %d =\n%.*s\n", seq2_end - seq2_begin, seq2_end - seq2_begin, sequence_set2 + seq2_begin);
printf("traceback = \n");
bool done = false;
int current_pos = ((seq1_end - seq1_begin)+1) * ((seq2_end - seq2_begin)+1) -1; // start at the last cell of the matrix
// Fix LENGTH, so that it takes more than just square... this is not important
for (int i = 0; i < LENGTH + 1; i++) {
for (int j = 0; j < LENGTH + 1; j++) {
int dir = current_matrix[i*(LENGTH+1)+j];
if ((dir & 0x03) == TRACE_UL) {
printf("\\");
} else if ((dir & 0x03) == TRACE_U) {
printf("^");
} else if ((dir & 0x03) == TRACE_L) {
printf("<");
} else {
printf("-");
}
}
printf("\n");
}
// Fix LENGTH, so that it takes more than just square... this is not important
for (int i = 0; i < LENGTH + 1; i++) {
for (int j = 0; j < LENGTH + 1; j++) {
int dir = current_matrix[i*(LENGTH+1)+j] >> 2;
printf("%4d ", dir);
}
printf("\n");
}
printf("Actual traceback:\n");
while (!done) {
int dir = current_matrix[current_pos];
// printf("current_pos = %d, dir = %x, score = %d\n", current_pos, dir & 0x03, dir >> 2);
if ((dir & 0x03) == TRACE_UL) {
printf("\\");
current_pos = current_pos - (seq1_end - seq1_begin + 1) - 1;
} else if ((dir & 0x03) == TRACE_U) {
printf("^");
current_pos = current_pos - (seq1_end - seq1_begin + 1);
} else if ((dir & 0x03) == TRACE_L) {
printf("<");
current_pos = current_pos - 1;
} else {
printf("seems to have reached the origin...");
done = true;
}
}
printf("traceback done!\n");
#endif
// fclose(fpo);
hipFree(d_sequence_set1);
hipFree(d_sequence_set2);
hipFree(d_pos1);
hipFree(d_pos2);
hipFree(d_pos_matrix);
hipFree(d_score_matrix);
free(score_matrix);
}
| 0fd9447559b90d8f625c2fa1a93f6fad2c6fd581.cu | #define LIMIT -999
#define BLOCK_SIZE 16
#define MAX_SEQ_LEN 2100
#define MAX_SEQ_NUM 1024
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <sys/time.h>
#include "needle.h"
#include "needle_cpu.h"
#include <pthread.h>
#define LENGTH 1600
//#define TRACEBACK
// includes, kernels
#include "needle_cpu.c"
//#include "needle_kernel_dynamic.cu"
#include "needle_kernel_diagonal.cu"
inline void cudaCheckError(int line, cudaError_t ce)
{
if (ce != cudaSuccess) {
printf("Error: line %d %s\n", line, cudaGetErrorString(ce));
exit(1);
}
}
// HACK Huan's hack
// this is not the updated validation code
int validation(int *score_matrix_cpu, int *score_matrix, unsigned int length)
{
unsigned int i = 0;
while (i!=length) {
if ( (score_matrix_cpu[i]) == (score_matrix[i] >> 2) ) {
++i;
continue;
}
else {
printf("i = %d, expected %d, got %d.\n",i, score_matrix_cpu[i], score_matrix[i] >> 2);
return 0;
}
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <pair number> <penalty> \n", argv[0]);
fprintf(stderr, "\t<pair number> - times of comparison\n");
fprintf(stderr, "\t<penalty> - penalty(negative integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
double time, end_time;
int pair_num;
short penalty;
char sequence_set1[MAX_SEQ_LEN*MAX_SEQ_NUM] = {0}, sequence_set2[MAX_SEQ_LEN*MAX_SEQ_NUM] = {0};
unsigned int pos1[MAX_SEQ_NUM] = {0}, pos2[MAX_SEQ_NUM] = {0}, pos_matrix[MAX_SEQ_NUM] = {0};
int *score_matrix;
int *trace_matrix;
int *score_matrix_cpu;
int *trace_matrix_cpu;
char *d_sequence_set1, *d_sequence_set2;
unsigned int *d_pos1, *d_pos2, *d_pos_matrix;
int *d_score_matrix;
int seq1_len, seq2_len;
if (argc == 3)
{
pair_num = atoi(argv[1]);
penalty = atoi(argv[2]);
if (pair_num>MAX_SEQ_NUM) {
fprintf(stderr, "\t<number of pairs> - number of pairs, must be less than %d\n",MAX_SEQ_NUM);
exit(1);
}
}
else {
usage(argc, argv);
}
// first API
time = gettime();
cudaCheckError( __LINE__, cudaSetDevice(0) );
end_time = gettime();
fprintf(stdout,"First API,%lf\n",end_time-time);
time = end_time;
// Get input data
srand ( 7 );
pos_matrix[0] = pos1[0] = pos2[0] = 0;
for (int i=0; i<pair_num; ++i) {
//please define your own sequence 1
seq1_len = LENGTH; //64+rand() % 20;
//printf("Seq1 length: %d\n", seq1_len);
for (int j=0; j<seq1_len; ++j)
sequence_set1[ pos1[i] + j ] = rand() % 20 + 'A';
pos1[i+1] = pos1[i] + seq1_len;
//please define your own sequence 2.
seq2_len = LENGTH;//64+rand() % 20;
//printf("Seq2 length: %d\n\n", seq2_len);
for (int j=0; j<seq2_len; ++j)
sequence_set2[ pos2[i] +j ] = rand() % 20 + 'A';
pos2[i+1] = pos2[i] + seq2_len;
//printf("Matrix size increase: %d\n", (seq1_len+1) * (seq2_len+1));
pos_matrix[i+1] = pos_matrix[i] + (seq1_len+1) * (seq2_len+1);
}
score_matrix = (int *)malloc( pos_matrix[pair_num]*sizeof(int));
score_matrix_cpu = (int *)malloc( pos_matrix[pair_num]*sizeof(int));
#ifdef _LP64
printf ("Running on a 64-bit platform!\n");
#else
#endif
/*
short M = -1;
printf("M: "BYTETOBINARYPATTERN" "BYTETOBINARYPATTERN"\n",
BYTETOBINARY(M>>8), BYTETOBINARY(M));
*/
printf ("Allocating %dMB of memory... \
(sizeof int=%d bytes, sizeof short=%d bytes)\n",
pos_matrix[pair_num]*sizeof(int)/1024/1024,
sizeof(int),
sizeof(short)
);
// printf("Start Needleman-Wunsch\n");
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_sequence_set1, sizeof(char)*pos1[pair_num] ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_sequence_set2, sizeof(char)*pos2[pair_num] ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_score_matrix, sizeof(int)*pos_matrix[pair_num]) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_pos1, sizeof(unsigned int)*(pair_num+1) ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_pos2, sizeof(unsigned int)*(pair_num+1) ) );
cudaCheckError( __LINE__, cudaMalloc( (void**)&d_pos_matrix, sizeof(unsigned int)*(pair_num+1) ) );
time = gettime();
needleman_cpu(sequence_set1, sequence_set2, pos1, pos2, score_matrix_cpu, pos_matrix, pair_num, penalty);
// CPU phases
end_time = gettime();
fprintf(stdout,"CPU time,%lf\n",end_time-time);
time = end_time;
// Memcpy to device
cudaCheckError( __LINE__,
cudaMemcpy( d_sequence_set1, sequence_set1, sizeof(char)*pos1[pair_num], cudaMemcpyHostToDevice )
);
cudaCheckError( __LINE__,
cudaMemcpy( d_sequence_set2, sequence_set2, sizeof(char)*pos2[pair_num], cudaMemcpyHostToDevice )
);
cudaCheckError( __LINE__,
cudaMemcpy( d_pos1, pos1, sizeof(unsigned int)*(pair_num+1), cudaMemcpyHostToDevice )
);
cudaCheckError( __LINE__,
cudaMemcpy( d_pos2, pos2, sizeof(unsigned int)*(pair_num+1), cudaMemcpyHostToDevice )
);
cudaCheckError( __LINE__,
cudaMemcpy( d_pos_matrix, pos_matrix, sizeof(unsigned int)*(pair_num+1), cudaMemcpyHostToDevice )
);
//end_time = gettime();
//fprintf(stdout,"Memcpy to device,%lf\n",end_time-time);
//time = end_time;
needleman_cuda_diagonal<<<pair_num,512>>>(d_sequence_set1, d_sequence_set2,
d_pos1, d_pos2,
d_score_matrix, d_pos_matrix,
pair_num, penalty);
cudaCheckError( __LINE__, cudaDeviceSynchronize() );
//end_time = gettime();
//fprintf(stdout,"kernel,%lf\n",end_time-time);
//time = end_time;
// Memcpy to host
cudaCheckError( __LINE__, cudaMemcpy( score_matrix, d_score_matrix, sizeof(int)*pos_matrix[pair_num], cudaMemcpyDeviceToHost ) );
end_time = gettime();
//fprintf(stdout,"Memcpy to host,%lf\n",end_time-time);
fprintf(stdout,"GPU time, %lf\n",end_time-time);
time = end_time;
if ( validation(score_matrix_cpu, score_matrix, pos_matrix[pair_num]) )
printf("Validation: PASS\n");
else
printf("Validation: FAIL\n");
#ifdef TRACEBACK
printf("Here comes the result of the first pair...\n");
int seq1_begin = pos1[0];
int seq1_end = pos1[1];
int seq2_begin = pos2[0];
int seq2_end = pos2[1];
int *current_matrix = score_matrix + pos_matrix[0];
printf("1st seq len = %d =\n%.*s\n", seq1_end - seq1_begin, seq1_end - seq1_begin, sequence_set1 + seq1_begin);
printf("2nd seq len = %d =\n%.*s\n", seq2_end - seq2_begin, seq2_end - seq2_begin, sequence_set2 + seq2_begin);
printf("traceback = \n");
bool done = false;
int current_pos = ((seq1_end - seq1_begin)+1) * ((seq2_end - seq2_begin)+1) -1; // start at the last cell of the matrix
// Fix LENGTH, so that it takes more than just square... this is not important
for (int i = 0; i < LENGTH + 1; i++) {
for (int j = 0; j < LENGTH + 1; j++) {
int dir = current_matrix[i*(LENGTH+1)+j];
if ((dir & 0x03) == TRACE_UL) {
printf("\\");
} else if ((dir & 0x03) == TRACE_U) {
printf("^");
} else if ((dir & 0x03) == TRACE_L) {
printf("<");
} else {
printf("-");
}
}
printf("\n");
}
// Fix LENGTH, so that it takes more than just square... this is not important
for (int i = 0; i < LENGTH + 1; i++) {
for (int j = 0; j < LENGTH + 1; j++) {
int dir = current_matrix[i*(LENGTH+1)+j] >> 2;
printf("%4d ", dir);
}
printf("\n");
}
printf("Actual traceback:\n");
while (!done) {
int dir = current_matrix[current_pos];
// printf("current_pos = %d, dir = %x, score = %d\n", current_pos, dir & 0x03, dir >> 2);
if ((dir & 0x03) == TRACE_UL) {
printf("\\");
current_pos = current_pos - (seq1_end - seq1_begin + 1) - 1;
} else if ((dir & 0x03) == TRACE_U) {
printf("^");
current_pos = current_pos - (seq1_end - seq1_begin + 1);
} else if ((dir & 0x03) == TRACE_L) {
printf("<");
current_pos = current_pos - 1;
} else {
printf("seems to have reached the origin...");
done = true;
}
}
printf("traceback done!\n");
#endif
// fclose(fpo);
cudaFree(d_sequence_set1);
cudaFree(d_sequence_set2);
cudaFree(d_pos1);
cudaFree(d_pos2);
cudaFree(d_pos_matrix);
cudaFree(d_score_matrix);
free(score_matrix);
}
|
c17cd3ed6be6b5ed1b70ff8f8e310219a5a39e10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlacpy_conj.cu normal z -> d, Tue Feb 9 16:05:28 2016
*/
#include "magma_internal.h"
#define BLOCK_SIZE 64
// copy & conjugate a single vector of length n.
// TODO: this was modeled on the old dswap routine. Update to new dlacpy code for 2D matrix?
__global__ void dlacpy_conj_kernel(
int n,
double *A1, int lda1,
double *A2, int lda2 )
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
int offset1 = x*lda1;
int offset2 = x*lda2;
if ( x < n )
{
A2[offset2] = MAGMA_D_CONJ( A1[offset1] );
}
}
extern "C" void
magmablas_dlacpy_conj_q(
magma_int_t n,
magmaDouble_ptr dA1, magma_int_t lda1,
magmaDouble_ptr dA2, magma_int_t lda2,
magma_queue_t queue )
{
dim3 threads( BLOCK_SIZE );
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( dlacpy_conj_kernel), dim3(blocks), dim3(threads), 0, queue->cuda_stream() , n, dA1, lda1, dA2, lda2 );
}
| c17cd3ed6be6b5ed1b70ff8f8e310219a5a39e10.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlacpy_conj.cu normal z -> d, Tue Feb 9 16:05:28 2016
*/
#include "magma_internal.h"
#define BLOCK_SIZE 64
// copy & conjugate a single vector of length n.
// TODO: this was modeled on the old dswap routine. Update to new dlacpy code for 2D matrix?
__global__ void dlacpy_conj_kernel(
int n,
double *A1, int lda1,
double *A2, int lda2 )
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
int offset1 = x*lda1;
int offset2 = x*lda2;
if ( x < n )
{
A2[offset2] = MAGMA_D_CONJ( A1[offset1] );
}
}
extern "C" void
magmablas_dlacpy_conj_q(
magma_int_t n,
magmaDouble_ptr dA1, magma_int_t lda1,
magmaDouble_ptr dA2, magma_int_t lda2,
magma_queue_t queue )
{
dim3 threads( BLOCK_SIZE );
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dlacpy_conj_kernel<<< blocks, threads, 0, queue->cuda_stream() >>>( n, dA1, lda1, dA2, lda2 );
}
|
e5142139ac1386b4dd38b9be2da379626e6d65c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHBasis.h"
#include "THHBasisForward.cuh"
#include "THHBasisBackward.cuh"
template<typename T>
__global__ void linearBasisForwardKernel(TensorInfo<T> basis, TensorInfo<int64_t>weightIndex,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_FORWARD_KERNEL(1, basis, weightIndex, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::linear(v, kMod))
}
template<typename T>
__global__ void quadraticBasisForwardKernel(TensorInfo<T> basis, TensorInfo<int64_t>weightIndex,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_FORWARD_KERNEL(2, basis, weightIndex, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::quadratic(v, kMod))
}
template<typename T>
__global__ void cubicBasisForwardKernel(TensorInfo<T> basis, TensorInfo<int64_t>weightIndex,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_FORWARD_KERNEL(3, basis, weightIndex, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::cubic(v, kMod))
}
template<typename T>
__global__ void linearBasisBackwardKernel(TensorInfo<T> self, TensorInfo<T>gradBasis,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_BACKWARD_KERNEL(1, self, gradBasis, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::linear(v, kMod),
BasisBackward<T>::linear(v, kMod))
}
template<typename T>
__global__ void quadraticBasisBackwardKernel(TensorInfo<T> self, TensorInfo<T>gradBasis,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_BACKWARD_KERNEL(2, self, gradBasis, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::quadratic(v, kMod),
BasisBackward<T>::quadratic(v, kMod))
}
template<typename T>
__global__ void cubicBasisBackwardKernel(TensorInfo<T> self, TensorInfo<T>gradBasis,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_BACKWARD_KERNEL(3, self, gradBasis, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::cubic(v, kMod),
BasisBackward<T>::cubic(v, kMod))
}
#include "generic/THCBasis.cu"
#include "THH/THHGenerateFloatTypes.h"
| e5142139ac1386b4dd38b9be2da379626e6d65c7.cu | #include "THCBasis.h"
#include "THCBasisForward.cuh"
#include "THCBasisBackward.cuh"
template<typename T>
__global__ void linearBasisForwardKernel(TensorInfo<T> basis, TensorInfo<int64_t>weightIndex,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_FORWARD_KERNEL(1, basis, weightIndex, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::linear(v, kMod))
}
template<typename T>
__global__ void quadraticBasisForwardKernel(TensorInfo<T> basis, TensorInfo<int64_t>weightIndex,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_FORWARD_KERNEL(2, basis, weightIndex, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::quadratic(v, kMod))
}
template<typename T>
__global__ void cubicBasisForwardKernel(TensorInfo<T> basis, TensorInfo<int64_t>weightIndex,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_FORWARD_KERNEL(3, basis, weightIndex, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::cubic(v, kMod))
}
template<typename T>
__global__ void linearBasisBackwardKernel(TensorInfo<T> self, TensorInfo<T>gradBasis,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_BACKWARD_KERNEL(1, self, gradBasis, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::linear(v, kMod),
BasisBackward<T>::linear(v, kMod))
}
template<typename T>
__global__ void quadraticBasisBackwardKernel(TensorInfo<T> self, TensorInfo<T>gradBasis,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_BACKWARD_KERNEL(2, self, gradBasis, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::quadratic(v, kMod),
BasisBackward<T>::quadratic(v, kMod))
}
template<typename T>
__global__ void cubicBasisBackwardKernel(TensorInfo<T> self, TensorInfo<T>gradBasis,
TensorInfo<T> pseudo, int64_t *kernelSize,
uint8_t *isOpenSpline, ptrdiff_t n) {
THC_TENSOR_BASIS_BACKWARD_KERNEL(3, self, gradBasis, pseudo, kernelSize, isOpenSpline, n,
BasisForward<T>::cubic(v, kMod),
BasisBackward<T>::cubic(v, kMod))
}
#include "generic/THCBasis.cu"
#include "THC/THCGenerateFloatTypes.h"
|
b734fc07b14d1b7f9d7c2d57901fe22df6500a72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
Author: MaikuZ
*/
#include <iostream>
#include <map>
#include <vector>
#include <string>
#include "particle_swarm.h"
#include "simulated_annealing.h"
#include "functions.h"
#include "function_list.h"
#include "random_gen.h"
void write_argument_list(std::vector<float> &arg) {
std::cout << "(";
for (int i = 0;i < arg.size() - 1;i++)
std::cout << arg[i] << ", ";
std::cout << arg.back() << ")";
}
#define TEST_ALL_FUNCTIONS_PS(NAME, DIM, OPT_VAL, BOUNDS) \
{\
std::cout << "Testing function: '" << #NAME << "', dimensions: " << DIM << ", minimum value: " << OPT_VAL << std::endl; \
std::vector<float> best_arg(DIM);\
ParticleSwarm<NAME, DIM> finder;\
finder.findMinimum(best_arg, 1024 * 1024); \
std::cout << "ParticleSwarm found the minimum at: ";\
write_argument_list(best_arg);\
std::cout << std::endl << "The value is: " << NAME(best_arg.data()) << std::endl;\
}\
#define TEST_ALL_FUNCTIONS_SA(NAME, DIM, OPT_VAL, BOUNDS) \
{\
std::cout << "Testing function: '" << #NAME << "', dimensions: " << DIM << ", minimum value: " << OPT_VAL << std::endl; \
std::vector<float> best_arg(DIM);\
SimulatedAnnealing<NAME, temperature, DIM> finder;\
finder.setDomainSpace(BOUNDS());\
finder.findMinimum(best_arg, 1024 * 1024); \
std::cout << "Simulated Annealing found the minimum at: ";\
write_argument_list(best_arg);\
std::cout << std::endl << "The value is: " << NAME(best_arg.data()) << std::endl;\
}\
__global__ void cuda_test(bool *working, int device) {
printf("Running on device: %d\n", device);
*working = true;
}
bool is_cuda_working(int device_no) {
bool *d_check;
gpuErrchk(hipMalloc(&d_check, sizeof(bool) * 1));
hipLaunchKernelGGL(( cuda_test), dim3(1),dim3(1), 0, 0, d_check, device_no);
bool is_working;
gpuErrchk(hipMemcpy(&is_working, d_check, sizeof(bool) * 1, hipMemcpyDeviceToHost));
return is_working;
}
int main() {
srand(time(0));
int device_no = 3;
gpuErrchk(hipSetDevice(device_no));
if(!is_cuda_working(device_no)) {
std::cout << "cuda is not working on device: " << device_no << std::endl;
return 0;
}
FOR_EACH_TEST_FUNCTION(TEST_ALL_FUNCTIONS_PS)
FOR_EACH_TEST_FUNCTION(TEST_ALL_FUNCTIONS_SA)
std::cout << "All functions were tested!" << std::endl;
return 0;
} | b734fc07b14d1b7f9d7c2d57901fe22df6500a72.cu | /*!
Author: MaikuZ
*/
#include <iostream>
#include <map>
#include <vector>
#include <string>
#include "particle_swarm.h"
#include "simulated_annealing.h"
#include "functions.h"
#include "function_list.h"
#include "random_gen.h"
void write_argument_list(std::vector<float> &arg) {
std::cout << "(";
for (int i = 0;i < arg.size() - 1;i++)
std::cout << arg[i] << ", ";
std::cout << arg.back() << ")";
}
#define TEST_ALL_FUNCTIONS_PS(NAME, DIM, OPT_VAL, BOUNDS) \
{\
std::cout << "Testing function: '" << #NAME << "', dimensions: " << DIM << ", minimum value: " << OPT_VAL << std::endl; \
std::vector<float> best_arg(DIM);\
ParticleSwarm<NAME, DIM> finder;\
finder.findMinimum(best_arg, 1024 * 1024); \
std::cout << "ParticleSwarm found the minimum at: ";\
write_argument_list(best_arg);\
std::cout << std::endl << "The value is: " << NAME(best_arg.data()) << std::endl;\
}\
#define TEST_ALL_FUNCTIONS_SA(NAME, DIM, OPT_VAL, BOUNDS) \
{\
std::cout << "Testing function: '" << #NAME << "', dimensions: " << DIM << ", minimum value: " << OPT_VAL << std::endl; \
std::vector<float> best_arg(DIM);\
SimulatedAnnealing<NAME, temperature, DIM> finder;\
finder.setDomainSpace(BOUNDS());\
finder.findMinimum(best_arg, 1024 * 1024); \
std::cout << "Simulated Annealing found the minimum at: ";\
write_argument_list(best_arg);\
std::cout << std::endl << "The value is: " << NAME(best_arg.data()) << std::endl;\
}\
__global__ void cuda_test(bool *working, int device) {
printf("Running on device: %d\n", device);
*working = true;
}
bool is_cuda_working(int device_no) {
bool *d_check;
gpuErrchk(cudaMalloc(&d_check, sizeof(bool) * 1));
cuda_test<<<1,1>>>(d_check, device_no);
bool is_working;
gpuErrchk(cudaMemcpy(&is_working, d_check, sizeof(bool) * 1, cudaMemcpyDeviceToHost));
return is_working;
}
int main() {
srand(time(0));
int device_no = 3;
gpuErrchk(cudaSetDevice(device_no));
if(!is_cuda_working(device_no)) {
std::cout << "cuda is not working on device: " << device_no << std::endl;
return 0;
}
FOR_EACH_TEST_FUNCTION(TEST_ALL_FUNCTIONS_PS)
FOR_EACH_TEST_FUNCTION(TEST_ALL_FUNCTIONS_SA)
std::cout << "All functions were tested!" << std::endl;
return 0;
} |
2b1d1c1f0442a0d163ffd440b138810776bd274b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Saxy_device(float* x, float* y, float* d, float xb, float yb, int n)
{
int i = threadIdx.x;
if (i < n)
d[i] = (x[i] - xb) * (y[i] - yb);
} | 2b1d1c1f0442a0d163ffd440b138810776bd274b.cu | #include "includes.h"
__global__ void Saxy_device(float* x, float* y, float* d, float xb, float yb, int n)
{
int i = threadIdx.x;
if (i < n)
d[i] = (x[i] - xb) * (y[i] - yb);
} |
93838c7147955933b3eb9286c9c93031d4318e23.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cstdio>
#include <omp.h>
#include <ctime>
#include <cstring> // memset
#include <cstdlib> // rand, RAND_MAX
#include <cmath> // sqrtf
#include <vector>
#include <algorithm>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include <iostream>
#include <cstdlib>
#include <device_launch_parameters.h>
#include "helper_cuda.h"
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <set>
#include <unistd.h>
#include <sys/time.h>
#include <map>
#include <assert.h>
#include "ellipsoid_query_gpu.h"
#include "cuda_utils.h"
int row = 0;
int col = 0;
using namespace std;
const int max_iter = 1000;
/* ---------------------------------------------------------------- */
//
// the following functions come from here:
//
// https://people.sc.fsu.edu/~jburkardt/cpp_src/jacobi_eigenvalue/jacobi_eigenvalue.cpp
//
// attributed to j. burkardt, FSU
// they are unmodified except to add __host__ __device__ decorations
//
//****************************************************************************80
__device__ void r8mat_diag_get_vector(int n, float a[], float v[])
{
int i;
for ( i = 0; i < n; i++ )
{
v[i] = a[i+i*n];
}
return;
}
//****************************************************************************80
__device__ void r8mat_identity(int n, float a[])
{
int i;
int j;
int k;
k = 0;
for ( j = 0; j < n; j++ )
{
for ( i = 0; i < n; i++ )
{
if ( i == j )
{
a[k] = 1.0;
}
else
{
a[k] = 0.0;
}
k = k + 1;
}
}
return;
}
//****************************************************************************80
__device__ void jacobi_eigenvalue(int n, float a[], int it_max, float v[], float d[], int &it_num, int &rot_num)
{
float *bw;
float c;
float g;
float gapq;
float h;
int i;
int j;
int k;
int l;
int m;
int p;
int q;
float s;
float t;
float tau;
float term;
float termp;
float termq;
float theta;
float thresh;
float w;
float *zw;
r8mat_identity ( n, v );
r8mat_diag_get_vector ( n, a, d );
bw = new float[n];
zw = new float[n];
for ( i = 0; i < n; i++ )
{
bw[i] = d[i];
zw[i] = 0.0;
}
it_num = 0;
rot_num = 0;
while ( it_num < it_max )
{
it_num = it_num + 1;
//
// The convergence threshold is based on the size of the elements in
// the strict upper triangle of the matrix.
//
thresh = 0.0;
for ( j = 0; j < n; j++ )
{
for ( i = 0; i < j; i++ )
{
thresh = thresh + a[i+j*n] * a[i+j*n];
}
}
thresh = sqrt ( thresh ) / ( float ) ( 4 * n );
if ( thresh == 0.0 )
{
break;
}
for ( p = 0; p < n; p++ )
{
for ( q = p + 1; q < n; q++ )
{
gapq = 10.0 * fabs ( a[p+q*n] );
termp = gapq + fabs ( d[p] );
termq = gapq + fabs ( d[q] );
//
// Annihilate tiny offdiagonal elements.
//
if ( 4 < it_num &&
termp == fabs ( d[p] ) &&
termq == fabs ( d[q] ) )
{
a[p+q*n] = 0.0;
}
//
// Otherwise, apply a rotation.
//
else if ( thresh <= fabs ( a[p+q*n] ) )
{
h = d[q] - d[p];
term = fabs ( h ) + gapq;
if ( term == fabs ( h ) )
{
t = a[p+q*n] / h;
}
else
{
theta = 0.5 * h / a[p+q*n];
t = 1.0 / ( fabs ( theta ) + sqrt ( 1.0 + theta * theta ) );
if ( theta < 0.0 )
{
t = - t;
}
}
c = 1.0 / sqrt ( 1.0 + t * t );
s = t * c;
tau = s / ( 1.0 + c );
h = t * a[p+q*n];
//
// Accumulate corrections to diagonal elements.
//
zw[p] = zw[p] - h;
zw[q] = zw[q] + h;
d[p] = d[p] - h;
d[q] = d[q] + h;
a[p+q*n] = 0.0;
//
// Rotate, using information from the upper triangle of A only.
//
for ( j = 0; j < p; j++ )
{
g = a[j+p*n];
h = a[j+q*n];
a[j+p*n] = g - s * ( h + g * tau );
a[j+q*n] = h + s * ( g - h * tau );
}
for ( j = p + 1; j < q; j++ )
{
g = a[p+j*n];
h = a[j+q*n];
a[p+j*n] = g - s * ( h + g * tau );
a[j+q*n] = h + s * ( g - h * tau );
}
for ( j = q + 1; j < n; j++ )
{
g = a[p+j*n];
h = a[q+j*n];
a[p+j*n] = g - s * ( h + g * tau );
a[q+j*n] = h + s * ( g - h * tau );
}
//
// Accumulate information in the eigenvector matrix.
//
for ( j = 0; j < n; j++ )
{
g = v[j+p*n];
h = v[j+q*n];
v[j+p*n] = g - s * ( h + g * tau );
v[j+q*n] = h + s * ( g - h * tau );
}
rot_num = rot_num + 1;
}
}
}
for ( i = 0; i < n; i++ )
{
bw[i] = bw[i] + zw[i];
d[i] = bw[i];
zw[i] = 0.0;
}
}
//
// Restore upper triangle of input matrix.
//
for ( j = 0; j < n; j++ )
{
for ( i = 0; i < j; i++ )
{
a[i+j*n] = a[j+i*n];
}
}
//
// Ascending sort the eigenvalues and eigenvectors.
//
for ( k = 0; k < n - 1; k++ )
{
m = k;
for ( l = k + 1; l < n; l++ )
{
if ( d[l] < d[m] )
{
m = l;
}
}
if ( m != k )
{
t = d[m];
d[m] = d[k];
d[k] = t;
for ( i = 0; i < n; i++ )
{
w = v[i+m*n];
v[i+m*n] = v[i+k*n];
v[i+k*n] = w;
}
}
}
delete [] bw;
delete [] zw;
return;
}
void initialize_matrix(int mat_id, int n, float *mat, float *v){
for (int i = 0; i < n*n; i++) *(v+(mat_id*n*n)+i) = mat[i];
}
// end of FSU code
/* ---------------------------------------------------------------- */
//Ellipsoid querying
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ellipsoid_point_kernel(int b, int n, int m, float e1, float e2, float e3,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
const int *__restrict__ fps_idx,
int *__restrict__ idx,
int *__restrict__ ingroup_pts_cnt,
float *__restrict__ ingroup_out,
float *__restrict__ ingroup_cva,
float *__restrict__ v,
float *__restrict__ d){
int batch_index = blockIdx.x;
int c = 3;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
fps_idx += batch_index * m;
idx += m * nsample * batch_index;
ingroup_pts_cnt += m*batch_index;
ingroup_out += m*nsample*3*batch_index;
ingroup_cva += m*3*3*batch_index;
v += m*3*3*batch_index;
d += m*3*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
//squares of axis-lengths
float aa = e1 * e1;
float bb = e2 * e2;
float cc = e3 * e3;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = fps_idx[j];
}
int cnt = 0;
for (int k = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
//first round of ellipsoid querying
float d2 = max(sqrtf(((new_x - x) * (new_x - x)/aa) + ((new_y - y) * (new_y - y)/bb) +
((new_z - z) * (new_z - z)/cc)),1e-20f);
if (d2 <= 1 && d2 > 0) {
idx[j * nsample + cnt] = k;
++cnt;
}
}
ingroup_pts_cnt[j] = cnt;
//grouping of ellipsoid-queried points
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
ingroup_out[j*nsample*c+k*c+l] = xyz[ii*c+l];
}
}
//from the grouped points pick unique points
float *Matrix=(float *)malloc(sizeof(float)*ingroup_pts_cnt[j]*c);
float *tMatrix=(float *)malloc(sizeof(float)*ingroup_pts_cnt[j]*c);
int flag=0;
if(ingroup_pts_cnt[j]>=3){
for(int k=0;k<ingroup_pts_cnt[j];k++){
int ii = idx[j*nsample+k];
Matrix[0+3*k] = xyz[ii*c+0];
Matrix[1+3*k] = xyz[ii*c+1];
Matrix[2+3*k] = xyz[ii*c+2];
if(xyz[ii*c+0]==0 && xyz[ii*c+1]==0 && xyz[ii*c+2]==0){
flag=1;
}
}
if(flag!=1){
//find mean of unique points
float means[3];
float d2;
means[0]=means[1]=means[2]=0.0;
for (int up=0;up<ingroup_pts_cnt[j];up++){
means[0]+=Matrix[up*c+0];
means[1]+=Matrix[up*c+1];
means[2]+=Matrix[up*c+2];
}
means[0]=means[0]/ingroup_pts_cnt[j];
means[1]=means[1]/ingroup_pts_cnt[j];
means[2]=means[2]/ingroup_pts_cnt[j];
//distance between mean of unique points and the centroid point
d2=sqrtf((means[0]-new_x)*(means[0]-new_x)+(means[1]-new_y)*(means[1]-new_y)+(means[2]-new_z)*(means[2]-new_z));
//covariance adjustment
if (d2 >= e1/4.0){
//if more points are on one side of the centroid
for(int up=0;up<ingroup_pts_cnt[j];up++){
//subtract centroid from the points
Matrix[c*up]=Matrix[c*up]-new_x;
Matrix[c*up+1]=Matrix[c*up+1]-new_y;
Matrix[c*up+2]=Matrix[c*up+2]-new_z;
}
}else{
for(int up=0;up<ingroup_pts_cnt[j];up++){
// subtract mean from the points
Matrix[c*up]=Matrix[c*up]-means[0];
Matrix[c*up+1]=Matrix[c*up+1]-means[1];
Matrix[c*up+2]=Matrix[c*up+2]-means[2];
}
}
//transpose points matrix
for(int tpt=0;tpt<c;tpt++){
for(int tup=0;tup<ingroup_pts_cnt[j];tup++){
tMatrix[tpt+c*tup]=Matrix[tpt+c*tup];
}
}
//calculate covariance matrix
float *covm=(float *)malloc(sizeof(float)*c*c);
for(int t3=0;t3<c;t3++){
for(int tn=0;tn<c;tn++){
covm[tn+t3*c] = 0.0;
for(int n3=0;n3<ingroup_pts_cnt[j];n3++){
covm[tn+t3*c]+=tMatrix[t3+c*n3]*Matrix[tn+n3*c];
}
ingroup_cva[j*c*c+tn+t3*c]=covm[tn+t3*c]/(ingroup_pts_cnt[j]-1);
}
}
free(covm);
}
}
free(Matrix);
free(tMatrix);
int it_num;
int rot_num;
if((ingroup_pts_cnt[j]>=3)){
//Eigendecomposition
jacobi_eigenvalue(c, ingroup_cva+(j*c*c), max_iter, v+(j*c*c), d+(j*c), it_num, rot_num);
cnt = ingroup_pts_cnt[j];
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ellipsoid
float x1=xyz[k*3+0];
float y1=xyz[k*3+1];
float z1=xyz[k*3+2];
float spoint[3];
float rspoint[3];
spoint[0]=x1-new_x;
spoint[1]=y1-new_y;
spoint[2]=z1-new_z;
//rotating input points
rspoint[0] = ((*(v+(c*c*j)+6)))*spoint[0]+((*(v+(c*c*j)+7)))*spoint[1]+((*(v+(c*c*j)+8)))*spoint[2];
rspoint[1] = ((*(v+(c*c*j)+3)))*spoint[0]+((*(v+(c*c*j)+4)))*spoint[1]+((*(v+(c*c*j)+5)))*spoint[2];
rspoint[2] = ((*(v+(c*c*j)+0)))*spoint[0]+((*(v+(c*c*j)+1)))*spoint[1]+((*(v+(c*c*j)+2)))*spoint[2];
float xx = rspoint[0];
float yy = rspoint[1];
float zz = rspoint[2];
//second querying - reoriented ellipsoid
float d3=max(sqrtf((xx*xx/aa)+(yy*yy/bb)+(zz*zz/cc)),1e-20f);
//union of both query points
if (d3<=1) {
int kflag=0;
for(int kk=0;kk<nsample;kk++){
if (idx[j*nsample+kk]==k){
kflag=1;
break;
}
}
if (kflag!=1){
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
ingroup_pts_cnt[j] = cnt;
}
}
}
void query_ellipsoid_point_kernel_wrapper(int b, int n, int m, float e1, float e2, float e3,
int nsample, const float *new_xyz,
const float *xyz, const int *fps_idx, int *idx,
int *ingroup_pts_cnt, float *ingroup_out,
float *ingroup_cva, float *v, float *d,
hipStream_t stream) {
hipError_t err;
hipLaunchKernelGGL(( query_ellipsoid_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, e1, e2, e3, nsample, new_xyz, xyz, fps_idx, idx, ingroup_pts_cnt, ingroup_out, ingroup_cva, v, d);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed inside ellipsoid wrapper: %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 93838c7147955933b3eb9286c9c93031d4318e23.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cstdio>
#include <omp.h>
#include <ctime>
#include <cstring> // memset
#include <cstdlib> // rand, RAND_MAX
#include <cmath> // sqrtf
#include <vector>
#include <algorithm>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <iostream>
#include <cstdlib>
#include <device_launch_parameters.h>
#include "helper_cuda.h"
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <set>
#include <unistd.h>
#include <sys/time.h>
#include <map>
#include <assert.h>
#include "ellipsoid_query_gpu.h"
#include "cuda_utils.h"
int row = 0;
int col = 0;
using namespace std;
const int max_iter = 1000;
/* ---------------------------------------------------------------- */
//
// the following functions come from here:
//
// https://people.sc.fsu.edu/~jburkardt/cpp_src/jacobi_eigenvalue/jacobi_eigenvalue.cpp
//
// attributed to j. burkardt, FSU
// they are unmodified except to add __host__ __device__ decorations
//
//****************************************************************************80
__device__ void r8mat_diag_get_vector(int n, float a[], float v[])
{
int i;
for ( i = 0; i < n; i++ )
{
v[i] = a[i+i*n];
}
return;
}
//****************************************************************************80
__device__ void r8mat_identity(int n, float a[])
{
int i;
int j;
int k;
k = 0;
for ( j = 0; j < n; j++ )
{
for ( i = 0; i < n; i++ )
{
if ( i == j )
{
a[k] = 1.0;
}
else
{
a[k] = 0.0;
}
k = k + 1;
}
}
return;
}
//****************************************************************************80
__device__ void jacobi_eigenvalue(int n, float a[], int it_max, float v[], float d[], int &it_num, int &rot_num)
{
float *bw;
float c;
float g;
float gapq;
float h;
int i;
int j;
int k;
int l;
int m;
int p;
int q;
float s;
float t;
float tau;
float term;
float termp;
float termq;
float theta;
float thresh;
float w;
float *zw;
r8mat_identity ( n, v );
r8mat_diag_get_vector ( n, a, d );
bw = new float[n];
zw = new float[n];
for ( i = 0; i < n; i++ )
{
bw[i] = d[i];
zw[i] = 0.0;
}
it_num = 0;
rot_num = 0;
while ( it_num < it_max )
{
it_num = it_num + 1;
//
// The convergence threshold is based on the size of the elements in
// the strict upper triangle of the matrix.
//
thresh = 0.0;
for ( j = 0; j < n; j++ )
{
for ( i = 0; i < j; i++ )
{
thresh = thresh + a[i+j*n] * a[i+j*n];
}
}
thresh = sqrt ( thresh ) / ( float ) ( 4 * n );
if ( thresh == 0.0 )
{
break;
}
for ( p = 0; p < n; p++ )
{
for ( q = p + 1; q < n; q++ )
{
gapq = 10.0 * fabs ( a[p+q*n] );
termp = gapq + fabs ( d[p] );
termq = gapq + fabs ( d[q] );
//
// Annihilate tiny offdiagonal elements.
//
if ( 4 < it_num &&
termp == fabs ( d[p] ) &&
termq == fabs ( d[q] ) )
{
a[p+q*n] = 0.0;
}
//
// Otherwise, apply a rotation.
//
else if ( thresh <= fabs ( a[p+q*n] ) )
{
h = d[q] - d[p];
term = fabs ( h ) + gapq;
if ( term == fabs ( h ) )
{
t = a[p+q*n] / h;
}
else
{
theta = 0.5 * h / a[p+q*n];
t = 1.0 / ( fabs ( theta ) + sqrt ( 1.0 + theta * theta ) );
if ( theta < 0.0 )
{
t = - t;
}
}
c = 1.0 / sqrt ( 1.0 + t * t );
s = t * c;
tau = s / ( 1.0 + c );
h = t * a[p+q*n];
//
// Accumulate corrections to diagonal elements.
//
zw[p] = zw[p] - h;
zw[q] = zw[q] + h;
d[p] = d[p] - h;
d[q] = d[q] + h;
a[p+q*n] = 0.0;
//
// Rotate, using information from the upper triangle of A only.
//
for ( j = 0; j < p; j++ )
{
g = a[j+p*n];
h = a[j+q*n];
a[j+p*n] = g - s * ( h + g * tau );
a[j+q*n] = h + s * ( g - h * tau );
}
for ( j = p + 1; j < q; j++ )
{
g = a[p+j*n];
h = a[j+q*n];
a[p+j*n] = g - s * ( h + g * tau );
a[j+q*n] = h + s * ( g - h * tau );
}
for ( j = q + 1; j < n; j++ )
{
g = a[p+j*n];
h = a[q+j*n];
a[p+j*n] = g - s * ( h + g * tau );
a[q+j*n] = h + s * ( g - h * tau );
}
//
// Accumulate information in the eigenvector matrix.
//
for ( j = 0; j < n; j++ )
{
g = v[j+p*n];
h = v[j+q*n];
v[j+p*n] = g - s * ( h + g * tau );
v[j+q*n] = h + s * ( g - h * tau );
}
rot_num = rot_num + 1;
}
}
}
for ( i = 0; i < n; i++ )
{
bw[i] = bw[i] + zw[i];
d[i] = bw[i];
zw[i] = 0.0;
}
}
//
// Restore upper triangle of input matrix.
//
for ( j = 0; j < n; j++ )
{
for ( i = 0; i < j; i++ )
{
a[i+j*n] = a[j+i*n];
}
}
//
// Ascending sort the eigenvalues and eigenvectors.
//
for ( k = 0; k < n - 1; k++ )
{
m = k;
for ( l = k + 1; l < n; l++ )
{
if ( d[l] < d[m] )
{
m = l;
}
}
if ( m != k )
{
t = d[m];
d[m] = d[k];
d[k] = t;
for ( i = 0; i < n; i++ )
{
w = v[i+m*n];
v[i+m*n] = v[i+k*n];
v[i+k*n] = w;
}
}
}
delete [] bw;
delete [] zw;
return;
}
void initialize_matrix(int mat_id, int n, float *mat, float *v){
for (int i = 0; i < n*n; i++) *(v+(mat_id*n*n)+i) = mat[i];
}
// end of FSU code
/* ---------------------------------------------------------------- */
//Ellipsoid querying
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ellipsoid_point_kernel(int b, int n, int m, float e1, float e2, float e3,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
const int *__restrict__ fps_idx,
int *__restrict__ idx,
int *__restrict__ ingroup_pts_cnt,
float *__restrict__ ingroup_out,
float *__restrict__ ingroup_cva,
float *__restrict__ v,
float *__restrict__ d){
int batch_index = blockIdx.x;
int c = 3;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
fps_idx += batch_index * m;
idx += m * nsample * batch_index;
ingroup_pts_cnt += m*batch_index;
ingroup_out += m*nsample*3*batch_index;
ingroup_cva += m*3*3*batch_index;
v += m*3*3*batch_index;
d += m*3*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
//squares of axis-lengths
float aa = e1 * e1;
float bb = e2 * e2;
float cc = e3 * e3;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = fps_idx[j];
}
int cnt = 0;
for (int k = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
//first round of ellipsoid querying
float d2 = max(sqrtf(((new_x - x) * (new_x - x)/aa) + ((new_y - y) * (new_y - y)/bb) +
((new_z - z) * (new_z - z)/cc)),1e-20f);
if (d2 <= 1 && d2 > 0) {
idx[j * nsample + cnt] = k;
++cnt;
}
}
ingroup_pts_cnt[j] = cnt;
//grouping of ellipsoid-queried points
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
ingroup_out[j*nsample*c+k*c+l] = xyz[ii*c+l];
}
}
//from the grouped points pick unique points
float *Matrix=(float *)malloc(sizeof(float)*ingroup_pts_cnt[j]*c);
float *tMatrix=(float *)malloc(sizeof(float)*ingroup_pts_cnt[j]*c);
int flag=0;
if(ingroup_pts_cnt[j]>=3){
for(int k=0;k<ingroup_pts_cnt[j];k++){
int ii = idx[j*nsample+k];
Matrix[0+3*k] = xyz[ii*c+0];
Matrix[1+3*k] = xyz[ii*c+1];
Matrix[2+3*k] = xyz[ii*c+2];
if(xyz[ii*c+0]==0 && xyz[ii*c+1]==0 && xyz[ii*c+2]==0){
flag=1;
}
}
if(flag!=1){
//find mean of unique points
float means[3];
float d2;
means[0]=means[1]=means[2]=0.0;
for (int up=0;up<ingroup_pts_cnt[j];up++){
means[0]+=Matrix[up*c+0];
means[1]+=Matrix[up*c+1];
means[2]+=Matrix[up*c+2];
}
means[0]=means[0]/ingroup_pts_cnt[j];
means[1]=means[1]/ingroup_pts_cnt[j];
means[2]=means[2]/ingroup_pts_cnt[j];
//distance between mean of unique points and the centroid point
d2=sqrtf((means[0]-new_x)*(means[0]-new_x)+(means[1]-new_y)*(means[1]-new_y)+(means[2]-new_z)*(means[2]-new_z));
//covariance adjustment
if (d2 >= e1/4.0){
//if more points are on one side of the centroid
for(int up=0;up<ingroup_pts_cnt[j];up++){
//subtract centroid from the points
Matrix[c*up]=Matrix[c*up]-new_x;
Matrix[c*up+1]=Matrix[c*up+1]-new_y;
Matrix[c*up+2]=Matrix[c*up+2]-new_z;
}
}else{
for(int up=0;up<ingroup_pts_cnt[j];up++){
// subtract mean from the points
Matrix[c*up]=Matrix[c*up]-means[0];
Matrix[c*up+1]=Matrix[c*up+1]-means[1];
Matrix[c*up+2]=Matrix[c*up+2]-means[2];
}
}
//transpose points matrix
for(int tpt=0;tpt<c;tpt++){
for(int tup=0;tup<ingroup_pts_cnt[j];tup++){
tMatrix[tpt+c*tup]=Matrix[tpt+c*tup];
}
}
//calculate covariance matrix
float *covm=(float *)malloc(sizeof(float)*c*c);
for(int t3=0;t3<c;t3++){
for(int tn=0;tn<c;tn++){
covm[tn+t3*c] = 0.0;
for(int n3=0;n3<ingroup_pts_cnt[j];n3++){
covm[tn+t3*c]+=tMatrix[t3+c*n3]*Matrix[tn+n3*c];
}
ingroup_cva[j*c*c+tn+t3*c]=covm[tn+t3*c]/(ingroup_pts_cnt[j]-1);
}
}
free(covm);
}
}
free(Matrix);
free(tMatrix);
int it_num;
int rot_num;
if((ingroup_pts_cnt[j]>=3)){
//Eigendecomposition
jacobi_eigenvalue(c, ingroup_cva+(j*c*c), max_iter, v+(j*c*c), d+(j*c), it_num, rot_num);
cnt = ingroup_pts_cnt[j];
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ellipsoid
float x1=xyz[k*3+0];
float y1=xyz[k*3+1];
float z1=xyz[k*3+2];
float spoint[3];
float rspoint[3];
spoint[0]=x1-new_x;
spoint[1]=y1-new_y;
spoint[2]=z1-new_z;
//rotating input points
rspoint[0] = ((*(v+(c*c*j)+6)))*spoint[0]+((*(v+(c*c*j)+7)))*spoint[1]+((*(v+(c*c*j)+8)))*spoint[2];
rspoint[1] = ((*(v+(c*c*j)+3)))*spoint[0]+((*(v+(c*c*j)+4)))*spoint[1]+((*(v+(c*c*j)+5)))*spoint[2];
rspoint[2] = ((*(v+(c*c*j)+0)))*spoint[0]+((*(v+(c*c*j)+1)))*spoint[1]+((*(v+(c*c*j)+2)))*spoint[2];
float xx = rspoint[0];
float yy = rspoint[1];
float zz = rspoint[2];
//second querying - reoriented ellipsoid
float d3=max(sqrtf((xx*xx/aa)+(yy*yy/bb)+(zz*zz/cc)),1e-20f);
//union of both query points
if (d3<=1) {
int kflag=0;
for(int kk=0;kk<nsample;kk++){
if (idx[j*nsample+kk]==k){
kflag=1;
break;
}
}
if (kflag!=1){
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
ingroup_pts_cnt[j] = cnt;
}
}
}
void query_ellipsoid_point_kernel_wrapper(int b, int n, int m, float e1, float e2, float e3,
int nsample, const float *new_xyz,
const float *xyz, const int *fps_idx, int *idx,
int *ingroup_pts_cnt, float *ingroup_out,
float *ingroup_cva, float *v, float *d,
cudaStream_t stream) {
cudaError_t err;
query_ellipsoid_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, e1, e2, e3, nsample, new_xyz, xyz, fps_idx, idx, ingroup_pts_cnt, ingroup_out, ingroup_cva, v, d);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed inside ellipsoid wrapper: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
a65a31a8100ec2298fd0fe23cf1d1855058f3754.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "slice_unpool_layer_cuda_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// -------- Max Unpooling Forward kernel
// slice_idx: num_batch, num_points
// data: num_batch, channels, num_slice , 1
// output: num_batch, channels, num_points, 1
__global__ void slice_unpool_forward_gpu(const int nthreads, float * data, int * slice_idx, const int num_slice, const int num_batch, const int channels, const int num_points, float * output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads)
{
// get the index of point
const int c = index / num_points;
const int i = index % num_points;
// UnPooling
int n;
for (n = 0; n < num_batch; n++) {
// get slice index
int s_idx = slice_idx[ n*num_points + i ];
// get output index, [n, c, i, 0]
int output_idx = n * channels * num_points + c * num_points + i;
// get input index, [n,, c, cls_idx, 0]
int input_index = n * channels * num_slice + c * num_slice + s_idx;
output[ output_idx ] = data[input_index];
}
}
}
// -------- Max Unpooling Backward kernel
__global__ void slice_unpool_backward_gpu(const int nthreads, float * top, int * slice_idx, const int num_slice, const int num_batch, const int channels, const int num_points, float * output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads)
{
// get the index of point
const int c = index / num_batch;
const int n = index % num_batch;
int i;
for (i = 0; i < num_points; i++) {
int s_idx = slice_idx[ n*num_points + i ];
int top_index = n * channels * num_points + c * num_points + i; //top[n, c, i, 0]
int bottom_index = n * channels * num_slice + c * num_slice + s_idx ; // output[n, c, cls_idx, 0]
output[bottom_index] += top[top_index];
}
}
}
// -------- Unpooling Forward laucher
int slice_unpool_forward_gpu_laucher(float * data, int * slice_idx, const int num_slice, const int num_batch, const int channels, const int num_points, float * output, hipStream_t stream)
{
const int kThreadsPerBlock = 1024;
const int kBlocks = (num_points * channels + kThreadsPerBlock - 1) / kThreadsPerBlock;
hipError_t err;
hipLaunchKernelGGL(( slice_unpool_forward_gpu), dim3(kBlocks), dim3(kThreadsPerBlock), 0, stream, num_points * channels, data, slice_idx, num_slice, num_batch, channels, num_points, output);
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) );
exit( -1 );
}
return 1;
}
// -------- Unpooling Backward laucher
int slice_unpool_backward_gpu_laucher(float * top, int * slice_idx, const int num_slice, const int num_batch, const int channels, const int num_points, float * output, hipStream_t stream)
{
const int kThreadsPerBlock = 1024;
const int kBlocks = (num_batch * channels + kThreadsPerBlock - 1) / kThreadsPerBlock;
hipError_t err;
hipLaunchKernelGGL(( slice_unpool_backward_gpu), dim3(kBlocks), dim3(kThreadsPerBlock), 0, stream, num_batch * channels, top, slice_idx, num_slice, num_batch, channels, num_points, output);
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) );
exit( -1 );
}
return 1;
}
#ifdef __cplusplus
}
#endif
| a65a31a8100ec2298fd0fe23cf1d1855058f3754.cu | #ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "slice_unpool_layer_cuda_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// -------- Max Unpooling Forward kernel
// slice_idx: num_batch, num_points
// data: num_batch, channels, num_slice , 1
// output: num_batch, channels, num_points, 1
__global__ void slice_unpool_forward_gpu(const int nthreads, float * data, int * slice_idx, const int num_slice, const int num_batch, const int channels, const int num_points, float * output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads)
{
// get the index of point
const int c = index / num_points;
const int i = index % num_points;
// UnPooling
int n;
for (n = 0; n < num_batch; n++) {
// get slice index
int s_idx = slice_idx[ n*num_points + i ];
// get output index, [n, c, i, 0]
int output_idx = n * channels * num_points + c * num_points + i;
// get input index, [n,, c, cls_idx, 0]
int input_index = n * channels * num_slice + c * num_slice + s_idx;
output[ output_idx ] = data[input_index];
}
}
}
// -------- Max Unpooling Backward kernel
__global__ void slice_unpool_backward_gpu(const int nthreads, float * top, int * slice_idx, const int num_slice, const int num_batch, const int channels, const int num_points, float * output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads)
{
// get the index of point
const int c = index / num_batch;
const int n = index % num_batch;
int i;
for (i = 0; i < num_points; i++) {
int s_idx = slice_idx[ n*num_points + i ];
int top_index = n * channels * num_points + c * num_points + i; //top[n, c, i, 0]
int bottom_index = n * channels * num_slice + c * num_slice + s_idx ; // output[n, c, cls_idx, 0]
output[bottom_index] += top[top_index];
}
}
}
// -------- Unpooling Forward laucher
int slice_unpool_forward_gpu_laucher(float * data, int * slice_idx, const int num_slice, const int num_batch, const int channels, const int num_points, float * output, cudaStream_t stream)
{
const int kThreadsPerBlock = 1024;
const int kBlocks = (num_points * channels + kThreadsPerBlock - 1) / kThreadsPerBlock;
cudaError_t err;
slice_unpool_forward_gpu<<< kBlocks, kThreadsPerBlock, 0, stream>>>(num_points * channels, data, slice_idx, num_slice, num_batch, channels, num_points, output);
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
}
// -------- Unpooling Backward laucher
int slice_unpool_backward_gpu_laucher(float * top, int * slice_idx, const int num_slice, const int num_batch, const int channels, const int num_points, float * output, cudaStream_t stream)
{
const int kThreadsPerBlock = 1024;
const int kBlocks = (num_batch * channels + kThreadsPerBlock - 1) / kThreadsPerBlock;
cudaError_t err;
slice_unpool_backward_gpu<<< kBlocks, kThreadsPerBlock, 0, stream>>>(num_batch * channels, top, slice_idx, num_slice, num_batch, channels, num_points, output);
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
}
#ifdef __cplusplus
}
#endif
|
307dc2284a8b95464b39f1044efc1faf2dfd8bb3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/binaryop.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace detail {
namespace { // anonymous namespace
template <typename _TargetT>
struct unary_cast {
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_numeric<SourceT>() &&
cudf::is_numeric<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return static_cast<TargetT>(element);
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_timestamp<SourceT>() &&
cudf::is_timestamp<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
// Convert source tick counts into target tick counts without blindly truncating them
// by dividing the respective duration time periods (which may not work for time before
// UNIX epoch)
return TargetT{cuda::std::chrono::floor<TargetT::duration>(element.time_since_epoch())};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_duration<SourceT>() &&
cudf::is_duration<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT>(element)};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<cudf::is_numeric<SourceT>() &&
cudf::is_duration<TargetT>()>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{static_cast<typename TargetT::rep>(element)};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_timestamp<SourceT>() &&
cudf::is_duration<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT>(element.time_since_epoch())};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<cudf::is_duration<SourceT>() &&
cudf::is_numeric<TargetT>()>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return static_cast<TargetT>(element.count());
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_duration<SourceT>() &&
cudf::is_timestamp<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT::duration>(element)};
}
};
template <typename _SourceT, typename _TargetT>
struct fixed_point_unary_cast {
numeric::scale_type scale;
using FixedPointT = std::conditional_t<cudf::is_fixed_point<_SourceT>(), _SourceT, _TargetT>;
using DeviceT = device_storage_type_t<FixedPointT>;
template <typename SourceT = _SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_fixed_point<_SourceT>() &&
cudf::is_numeric<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(DeviceT const element)
{
auto const fp = SourceT{numeric::scaled_integer<DeviceT>{element, scale}};
return static_cast<TargetT>(fp);
}
template <typename SourceT = _SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_numeric<_SourceT>() &&
cudf::is_fixed_point<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE DeviceT operator()(SourceT const element)
{
return TargetT{element, scale}.value();
}
};
template <typename From, typename To>
constexpr inline auto is_supported_non_fixed_point_cast()
{
return cudf::is_fixed_width<To>() &&
// Disallow fixed_point here (requires different specialization)
!(cudf::is_fixed_point<From>() || cudf::is_fixed_point<To>()) &&
// Disallow conversions between timestamps and numeric
!(cudf::is_timestamp<From>() && is_numeric<To>()) &&
!(cudf::is_timestamp<To>() && is_numeric<From>());
}
template <typename From, typename To>
constexpr inline auto is_supported_fixed_point_cast()
{
return (cudf::is_fixed_point<From>() && cudf::is_numeric<To>()) ||
(cudf::is_numeric<From>() && cudf::is_fixed_point<To>()) ||
(cudf::is_fixed_point<From>() && cudf::is_fixed_point<To>());
}
template <typename From, typename To>
constexpr inline auto is_supported_cast()
{
return is_supported_non_fixed_point_cast<From, To>() || is_supported_fixed_point_cast<From, To>();
}
template <typename From, typename To>
struct device_cast {
__device__ To operator()(From element) { return static_cast<To>(element); }
};
/**
* @brief Takes a `fixed_point` column_view as @p input and returns a `fixed_point` column with new
* @p scale
*
* @tparam T Type of the `fixed_point` column_view (`decimal32` or `decimal64`)
* @param input Input `column_view`
* @param scale `scale` of the returned `column`
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @return std::unique_ptr<column> Returned column with new @p scale
*/
template <typename T, typename std::enable_if_t<is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> rescale(column_view input,
numeric::scale_type scale,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace numeric;
if (input.type().scale() > scale) {
auto const scalar = make_fixed_point_scalar<T>(0, scale_type{scale});
auto const type = cudf::data_type{cudf::type_to_id<T>(), scale};
return detail::binary_operation(input, *scalar, binary_operator::ADD, type, stream, mr);
} else {
auto const diff = input.type().scale() - scale;
auto const scalar = make_fixed_point_scalar<T>(::pow(10, -diff), scale_type{diff});
auto const type = cudf::data_type{cudf::type_to_id<T>(), scale};
return detail::binary_operation(input, *scalar, binary_operator::DIV, type, stream, mr);
}
};
template <typename _SourceT>
struct dispatch_unary_cast_to {
column_view input;
dispatch_unary_cast_to(column_view inp) : input(inp) {}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<is_supported_non_fixed_point_cast<SourceT, TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceT>(),
input.end<SourceT>(),
output_mutable.begin<TargetT>(),
unary_cast<TargetT>{});
return output;
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() &&
cudf::is_numeric<TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
using DeviceT = device_storage_type_t<SourceT>;
auto const scale = numeric::scale_type{input.type().scale()};
thrust::transform(rmm::exec_policy(stream),
input.begin<DeviceT>(),
input.end<DeviceT>(),
output_mutable.begin<TargetT>(),
fixed_point_unary_cast<SourceT, TargetT>{scale});
return output;
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_numeric<SourceT>() &&
cudf::is_fixed_point<TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
using DeviceT = device_storage_type_t<TargetT>;
auto const scale = numeric::scale_type{type.scale()};
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceT>(),
input.end<SourceT>(),
output_mutable.begin<DeviceT>(),
fixed_point_unary_cast<SourceT, TargetT>{scale});
return output;
}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() &&
std::is_same_v<SourceT, TargetT>>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.type() == type) return std::make_unique<column>(input); // TODO add test for this
return detail::rescale<TargetT>(input, numeric::scale_type{type.scale()}, stream, mr);
}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() &&
not std::is_same_v<SourceT, TargetT>>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace numeric;
auto const size = input.size();
auto temporary =
std::make_unique<column>(cudf::data_type{type.id(), input.type().scale()},
size,
rmm::device_buffer{size * cudf::size_of(type), stream},
copy_bitmask(input, stream),
input.null_count());
using SourceDeviceT = device_storage_type_t<SourceT>;
using TargetDeviceT = device_storage_type_t<TargetT>;
mutable_column_view output_mutable = *temporary;
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceDeviceT>(),
input.end<SourceDeviceT>(),
output_mutable.begin<TargetDeviceT>(),
device_cast<SourceDeviceT, TargetDeviceT>{});
// clearly there is a more efficient way to do this, can optimize in the future
return rescale<TargetT>(*temporary, numeric::scale_type{type.scale()}, stream, mr);
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<not is_supported_cast<SourceT, TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
if (!cudf::is_fixed_width<TargetT>())
CUDF_FAIL("Column type must be numeric or chrono or decimal32/64");
else if (cudf::is_fixed_point<SourceT>())
CUDF_FAIL("Currently only decimal32/64 to floating point/integral is supported");
else if (cudf::is_timestamp<SourceT>() && is_numeric<TargetT>())
CUDF_FAIL("Timestamps can be created only from duration");
else
CUDF_FAIL("Timestamps cannot be converted to numeric without converting it to a duration");
}
};
struct dispatch_unary_cast_from {
column_view input;
dispatch_unary_cast_from(column_view inp) : input(inp) {}
template <typename T, typename std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!cudf::is_fixed_width<T>(), std::unique_ptr<column>> operator()(Args&&...)
{
CUDF_FAIL("Column type must be numeric or chrono or decimal32/64");
}
};
} // anonymous namespace
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width.");
return type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, stream, mr);
}
} // namespace detail
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::cast(input, type, rmm::cuda_stream_default, mr);
}
} // namespace cudf
| 307dc2284a8b95464b39f1044efc1faf2dfd8bb3.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/binaryop.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace detail {
namespace { // anonymous namespace
template <typename _TargetT>
struct unary_cast {
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_numeric<SourceT>() &&
cudf::is_numeric<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return static_cast<TargetT>(element);
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_timestamp<SourceT>() &&
cudf::is_timestamp<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
// Convert source tick counts into target tick counts without blindly truncating them
// by dividing the respective duration time periods (which may not work for time before
// UNIX epoch)
return TargetT{cuda::std::chrono::floor<TargetT::duration>(element.time_since_epoch())};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_duration<SourceT>() &&
cudf::is_duration<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT>(element)};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<cudf::is_numeric<SourceT>() &&
cudf::is_duration<TargetT>()>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{static_cast<typename TargetT::rep>(element)};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_timestamp<SourceT>() &&
cudf::is_duration<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT>(element.time_since_epoch())};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<cudf::is_duration<SourceT>() &&
cudf::is_numeric<TargetT>()>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return static_cast<TargetT>(element.count());
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_duration<SourceT>() &&
cudf::is_timestamp<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT::duration>(element)};
}
};
template <typename _SourceT, typename _TargetT>
struct fixed_point_unary_cast {
numeric::scale_type scale;
using FixedPointT = std::conditional_t<cudf::is_fixed_point<_SourceT>(), _SourceT, _TargetT>;
using DeviceT = device_storage_type_t<FixedPointT>;
template <typename SourceT = _SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_fixed_point<_SourceT>() &&
cudf::is_numeric<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(DeviceT const element)
{
auto const fp = SourceT{numeric::scaled_integer<DeviceT>{element, scale}};
return static_cast<TargetT>(fp);
}
template <typename SourceT = _SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_numeric<_SourceT>() &&
cudf::is_fixed_point<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE DeviceT operator()(SourceT const element)
{
return TargetT{element, scale}.value();
}
};
template <typename From, typename To>
constexpr inline auto is_supported_non_fixed_point_cast()
{
return cudf::is_fixed_width<To>() &&
// Disallow fixed_point here (requires different specialization)
!(cudf::is_fixed_point<From>() || cudf::is_fixed_point<To>()) &&
// Disallow conversions between timestamps and numeric
!(cudf::is_timestamp<From>() && is_numeric<To>()) &&
!(cudf::is_timestamp<To>() && is_numeric<From>());
}
template <typename From, typename To>
constexpr inline auto is_supported_fixed_point_cast()
{
return (cudf::is_fixed_point<From>() && cudf::is_numeric<To>()) ||
(cudf::is_numeric<From>() && cudf::is_fixed_point<To>()) ||
(cudf::is_fixed_point<From>() && cudf::is_fixed_point<To>());
}
template <typename From, typename To>
constexpr inline auto is_supported_cast()
{
return is_supported_non_fixed_point_cast<From, To>() || is_supported_fixed_point_cast<From, To>();
}
template <typename From, typename To>
struct device_cast {
__device__ To operator()(From element) { return static_cast<To>(element); }
};
/**
* @brief Takes a `fixed_point` column_view as @p input and returns a `fixed_point` column with new
* @p scale
*
* @tparam T Type of the `fixed_point` column_view (`decimal32` or `decimal64`)
* @param input Input `column_view`
* @param scale `scale` of the returned `column`
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @return std::unique_ptr<column> Returned column with new @p scale
*/
template <typename T, typename std::enable_if_t<is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> rescale(column_view input,
numeric::scale_type scale,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace numeric;
if (input.type().scale() > scale) {
auto const scalar = make_fixed_point_scalar<T>(0, scale_type{scale});
auto const type = cudf::data_type{cudf::type_to_id<T>(), scale};
return detail::binary_operation(input, *scalar, binary_operator::ADD, type, stream, mr);
} else {
auto const diff = input.type().scale() - scale;
auto const scalar = make_fixed_point_scalar<T>(std::pow(10, -diff), scale_type{diff});
auto const type = cudf::data_type{cudf::type_to_id<T>(), scale};
return detail::binary_operation(input, *scalar, binary_operator::DIV, type, stream, mr);
}
};
template <typename _SourceT>
struct dispatch_unary_cast_to {
column_view input;
dispatch_unary_cast_to(column_view inp) : input(inp) {}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<is_supported_non_fixed_point_cast<SourceT, TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceT>(),
input.end<SourceT>(),
output_mutable.begin<TargetT>(),
unary_cast<TargetT>{});
return output;
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() &&
cudf::is_numeric<TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
using DeviceT = device_storage_type_t<SourceT>;
auto const scale = numeric::scale_type{input.type().scale()};
thrust::transform(rmm::exec_policy(stream),
input.begin<DeviceT>(),
input.end<DeviceT>(),
output_mutable.begin<TargetT>(),
fixed_point_unary_cast<SourceT, TargetT>{scale});
return output;
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_numeric<SourceT>() &&
cudf::is_fixed_point<TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
using DeviceT = device_storage_type_t<TargetT>;
auto const scale = numeric::scale_type{type.scale()};
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceT>(),
input.end<SourceT>(),
output_mutable.begin<DeviceT>(),
fixed_point_unary_cast<SourceT, TargetT>{scale});
return output;
}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() &&
std::is_same_v<SourceT, TargetT>>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.type() == type) return std::make_unique<column>(input); // TODO add test for this
return detail::rescale<TargetT>(input, numeric::scale_type{type.scale()}, stream, mr);
}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() &&
not std::is_same_v<SourceT, TargetT>>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace numeric;
auto const size = input.size();
auto temporary =
std::make_unique<column>(cudf::data_type{type.id(), input.type().scale()},
size,
rmm::device_buffer{size * cudf::size_of(type), stream},
copy_bitmask(input, stream),
input.null_count());
using SourceDeviceT = device_storage_type_t<SourceT>;
using TargetDeviceT = device_storage_type_t<TargetT>;
mutable_column_view output_mutable = *temporary;
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceDeviceT>(),
input.end<SourceDeviceT>(),
output_mutable.begin<TargetDeviceT>(),
device_cast<SourceDeviceT, TargetDeviceT>{});
// clearly there is a more efficient way to do this, can optimize in the future
return rescale<TargetT>(*temporary, numeric::scale_type{type.scale()}, stream, mr);
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<not is_supported_cast<SourceT, TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
if (!cudf::is_fixed_width<TargetT>())
CUDF_FAIL("Column type must be numeric or chrono or decimal32/64");
else if (cudf::is_fixed_point<SourceT>())
CUDF_FAIL("Currently only decimal32/64 to floating point/integral is supported");
else if (cudf::is_timestamp<SourceT>() && is_numeric<TargetT>())
CUDF_FAIL("Timestamps can be created only from duration");
else
CUDF_FAIL("Timestamps cannot be converted to numeric without converting it to a duration");
}
};
struct dispatch_unary_cast_from {
column_view input;
dispatch_unary_cast_from(column_view inp) : input(inp) {}
template <typename T, typename std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!cudf::is_fixed_width<T>(), std::unique_ptr<column>> operator()(Args&&...)
{
CUDF_FAIL("Column type must be numeric or chrono or decimal32/64");
}
};
} // anonymous namespace
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width.");
return type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, stream, mr);
}
} // namespace detail
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::cast(input, type, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
71e6b0e97f75ee64a6ff01eff72938fe4c291269.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/cuda_helpers.c"
extern "C"{
#include "sum_helpers.c"
}
typedef double(*pointFunction_t)(double,double);
__device__ double max_binary_op(double a,double b){
return fmax(a,b);
}
__device__ pointFunction_t h_binary_op = max_binary_op;
__global__ void sum_kernel(double *a, double *b ,pointFunction_t p_binary_op ,double *output,int numElements)
{
const int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index >= numElements){return;}
output[index] = p_binary_op(a[index], b[index]);
}
int
main(void)
{
unsigned const int num_elements = 1<<13;
const size_t size = num_elements*sizeof(double);
double *h_a_orig_input;
double *h_b_orig_input;
double *h_output;
double *h_b_input;
double *h_a_input;
double *d_output;
double *d_a_input;
double *d_b_input;
initialize_host(size,&h_a_input,&h_b_input,&h_output);
for(int i = 0; i < num_elements;i++){
h_a_input[i] = (double)i;
h_b_input[i] = (double)0;
h_output[i] = 0;
}
initialize_device(size,&d_a_input,&d_b_input,&d_output);
copy_host_to_device(size,h_a_input,h_b_input,h_output,d_a_input,d_b_input,d_output);
h_a_orig_input = (double *)malloc(size);
h_b_orig_input = (double *)malloc(size);
memcpy(h_a_orig_input,h_a_input,size);
memcpy(h_b_orig_input,h_b_input,size);
pointFunction_t d_binary_op;
hipMemcpyFromSymbol(&d_binary_op, h_binary_op, sizeof(pointFunction_t));
int threadsPerBlock = 128;
int blocksPerGrid =(num_elements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( sum_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_a_input,d_b_input,d_binary_op ,d_output, num_elements);
copy_device_to_host(size,h_a_input,h_b_input,h_output,d_a_input,d_b_input,d_output);
check_binary_op(h_a_orig_input,h_b_orig_input,h_output,num_elements,blocksPerGrid);
printf("DESTROYING\n");
destroy_host(h_a_input,h_b_input,h_output);
destroy_device(d_a_input,d_b_input,d_output);
free(h_a_orig_input);
free(h_b_orig_input);
}
| 71e6b0e97f75ee64a6ff01eff72938fe4c291269.cu |
#include "../common/cuda_helpers.c"
extern "C"{
#include "sum_helpers.c"
}
typedef double(*pointFunction_t)(double,double);
__device__ double max_binary_op(double a,double b){
return fmax(a,b);
}
__device__ pointFunction_t h_binary_op = max_binary_op;
__global__ void sum_kernel(double *a, double *b ,pointFunction_t p_binary_op ,double *output,int numElements)
{
const int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index >= numElements){return;}
output[index] = p_binary_op(a[index], b[index]);
}
int
main(void)
{
unsigned const int num_elements = 1<<13;
const size_t size = num_elements*sizeof(double);
double *h_a_orig_input;
double *h_b_orig_input;
double *h_output;
double *h_b_input;
double *h_a_input;
double *d_output;
double *d_a_input;
double *d_b_input;
initialize_host(size,&h_a_input,&h_b_input,&h_output);
for(int i = 0; i < num_elements;i++){
h_a_input[i] = (double)i;
h_b_input[i] = (double)0;
h_output[i] = 0;
}
initialize_device(size,&d_a_input,&d_b_input,&d_output);
copy_host_to_device(size,h_a_input,h_b_input,h_output,d_a_input,d_b_input,d_output);
h_a_orig_input = (double *)malloc(size);
h_b_orig_input = (double *)malloc(size);
memcpy(h_a_orig_input,h_a_input,size);
memcpy(h_b_orig_input,h_b_input,size);
pointFunction_t d_binary_op;
cudaMemcpyFromSymbol(&d_binary_op, h_binary_op, sizeof(pointFunction_t));
int threadsPerBlock = 128;
int blocksPerGrid =(num_elements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
sum_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_a_input,d_b_input,d_binary_op ,d_output, num_elements);
copy_device_to_host(size,h_a_input,h_b_input,h_output,d_a_input,d_b_input,d_output);
check_binary_op(h_a_orig_input,h_b_orig_input,h_output,num_elements,blocksPerGrid);
printf("DESTROYING\n");
destroy_host(h_a_input,h_b_input,h_output);
destroy_device(d_a_input,d_b_input,d_output);
free(h_a_orig_input);
free(h_b_orig_input);
}
|
8996919fd0da02f10b0323d83e5125f07f7233ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************
*
* cuda-vecadd1.cu - Sum two arrays with CUDA, using thread blocks
*
* Written in 2017 by Moreno Marzolla <moreno.marzolla(at)unibo.it>
*
* To the extent possible under law, the author(s) have dedicated all
* copyright and related and neighboring rights to this software to the
* public domain worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication
* along with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*
* ---------------------------------------------------------------------------
*
* Based on the example shown in the CUDA toolkit documentation
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/
*
* Compile with:
* nvcc cuda-vecadd1.cu -o cuda-vecadd1
*
* Run with:
* ./cuda-vecadd1
*
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
__global__ void add( int *a, int *b, int *c )
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void vec_init( int *a, int n )
{
int i;
for (i=0; i<n; i++) {
a[i] = i;
}
}
#define N 512
int main( void )
{
int *a, *b, *c; /* host copies of a, b, c */
int *d_a, *d_b, *d_c; /* device copies of a, b, c */
int i;
const size_t size = N*sizeof(int);
/* Allocate space for device copies of a, b, c */
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
/* Allocate space for host copies of a, b, c */
a = (int*)malloc(size); vec_init(a, N);
b = (int*)malloc(size); vec_init(b, N);
c = (int*)malloc(size);
/* Copy inputs to device */
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
/* Launch add() kernel on GPU */
printf("Adding %d elements\n", N);
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, d_a, d_b, d_c);
/* Copy result back to host */
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
/* Check results */
for (i=0; i<N; i++) {
if ( c[i] != a[i] + b[i] ) {
printf("Error at index %d: a[%d]=%d, b[%d]=%d, c[%d]=%d\n",
i, i, a[i], i, b[i], i, c[i]);
return -1;
}
}
printf("Test OK\n");
/* Cleanup */
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
| 8996919fd0da02f10b0323d83e5125f07f7233ec.cu | /****************************************************************************
*
* cuda-vecadd1.cu - Sum two arrays with CUDA, using thread blocks
*
* Written in 2017 by Moreno Marzolla <moreno.marzolla(at)unibo.it>
*
* To the extent possible under law, the author(s) have dedicated all
* copyright and related and neighboring rights to this software to the
* public domain worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication
* along with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*
* ---------------------------------------------------------------------------
*
* Based on the example shown in the CUDA toolkit documentation
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/
*
* Compile with:
* nvcc cuda-vecadd1.cu -o cuda-vecadd1
*
* Run with:
* ./cuda-vecadd1
*
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
__global__ void add( int *a, int *b, int *c )
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void vec_init( int *a, int n )
{
int i;
for (i=0; i<n; i++) {
a[i] = i;
}
}
#define N 512
int main( void )
{
int *a, *b, *c; /* host copies of a, b, c */
int *d_a, *d_b, *d_c; /* device copies of a, b, c */
int i;
const size_t size = N*sizeof(int);
/* Allocate space for device copies of a, b, c */
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
/* Allocate space for host copies of a, b, c */
a = (int*)malloc(size); vec_init(a, N);
b = (int*)malloc(size); vec_init(b, N);
c = (int*)malloc(size);
/* Copy inputs to device */
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
/* Launch add() kernel on GPU */
printf("Adding %d elements\n", N);
add<<<N,1>>>(d_a, d_b, d_c);
/* Copy result back to host */
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
/* Check results */
for (i=0; i<N; i++) {
if ( c[i] != a[i] + b[i] ) {
printf("Error at index %d: a[%d]=%d, b[%d]=%d, c[%d]=%d\n",
i, i, a[i], i, b[i], i, c[i]);
return -1;
}
}
printf("Test OK\n");
/* Cleanup */
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
03b92afda060b04d2a71a9394dfcfa4cee2a61d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__device__ void mul(double a, double b, double *res)
{
*res = a * b;
#ifdef FPC_ERROR_ADD
*res = *res + 1e308;
#endif
#ifdef FPC_ERROR_SUB
*res = *res - (1e308);
#endif
#ifdef FPC_ERROR_MUL
*res = *res * 1e-323;
#endif
#ifdef FPC_ERROR_DIV
*res = (*res - *res) / (*res - *res);
#endif
}
__global__ void dot_prod(double *x, double *y, int size)
{
double d;
for (int i=0; i < size; ++i)
{
double tmp;
mul(x[i], y[i], &tmp);
d += tmp;
}
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
printf("dot: %f\n", d);
}
}
| 03b92afda060b04d2a71a9394dfcfa4cee2a61d4.cu |
#include <stdio.h>
__device__ void mul(double a, double b, double *res)
{
*res = a * b;
#ifdef FPC_ERROR_ADD
*res = *res + 1e308;
#endif
#ifdef FPC_ERROR_SUB
*res = *res - (1e308);
#endif
#ifdef FPC_ERROR_MUL
*res = *res * 1e-323;
#endif
#ifdef FPC_ERROR_DIV
*res = (*res - *res) / (*res - *res);
#endif
}
__global__ void dot_prod(double *x, double *y, int size)
{
double d;
for (int i=0; i < size; ++i)
{
double tmp;
mul(x[i], y[i], &tmp);
d += tmp;
}
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
printf("dot: %f\n", d);
}
}
|
6ac02cf5249160c1d35c36d785f3068b4faffb60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
When using this code in a scientific project, please cite one or all of the
following papers:
* Daniel Ruijters and Philippe Thvenaz,
GPU Prefilter for Accurate Cubic B-Spline Interpolation,
The Computer Journal, vol. 55, no. 1, pp. 15-20, January 2012.
http://dannyruijters.nl/docs/cudaPrefilter3.pdf
* Daniel Ruijters, Bart M. ter Haar Romeny, and Paul Suetens,
Efficient GPU-Based Texture Interpolation using Uniform B-Splines,
Journal of Graphics Tools, vol. 13, no. 4, pp. 61-69, 2008.
\*--------------------------------------------------------------------------*/
#ifndef _3D_CUBIC_BSPLINE_PREFILTER_H_
#define _3D_CUBIC_BSPLINE_PREFILTER_H_
#include "internal/cubicPrefilter_kernel.cu"
#include <stdio.h>
extern "C" {
//--------------------------------------------------------------------------
// Global CUDA procedures
//--------------------------------------------------------------------------
__global__ void SamplesToCoefficients3DX(float *volume, // in-place processing
uint pitch, // width in bytes
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
const uint z = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = (z * height + y) * pitch;
float *ptr = (float *)((uchar *)volume + startIdx);
ConvertToInterpolationCoefficients(ptr, width, sizeof(float));
}
__global__ void SamplesToCoefficients3DY(float *volume, // in-place processing
uint pitch, // width in bytes
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in y-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint z = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = z * height * pitch;
float *ptr = (float *)((uchar *)volume + startIdx);
ConvertToInterpolationCoefficients(ptr + x, height, pitch);
}
__global__ void SamplesToCoefficients3DZ(float *volume, // in-place processing
uint pitch, // width in bytes
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in z-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint y = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = y * pitch;
const uint slice = height * pitch;
float *ptr = (float *)((uchar *)volume + startIdx);
ConvertToInterpolationCoefficients(ptr + x, depth, slice);
}
}
#endif //_3D_CUBIC_BSPLINE_PREFILTER_H_
| 6ac02cf5249160c1d35c36d785f3068b4faffb60.cu | /*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
When using this code in a scientific project, please cite one or all of the
following papers:
* Daniel Ruijters and Philippe Th�venaz,
GPU Prefilter for Accurate Cubic B-Spline Interpolation,
The Computer Journal, vol. 55, no. 1, pp. 15-20, January 2012.
http://dannyruijters.nl/docs/cudaPrefilter3.pdf
* Daniel Ruijters, Bart M. ter Haar Romeny, and Paul Suetens,
Efficient GPU-Based Texture Interpolation using Uniform B-Splines,
Journal of Graphics Tools, vol. 13, no. 4, pp. 61-69, 2008.
\*--------------------------------------------------------------------------*/
#ifndef _3D_CUBIC_BSPLINE_PREFILTER_H_
#define _3D_CUBIC_BSPLINE_PREFILTER_H_
#include "internal/cubicPrefilter_kernel.cu"
#include <stdio.h>
extern "C" {
//--------------------------------------------------------------------------
// Global CUDA procedures
//--------------------------------------------------------------------------
__global__ void SamplesToCoefficients3DX(float *volume, // in-place processing
uint pitch, // width in bytes
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
const uint z = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = (z * height + y) * pitch;
float *ptr = (float *)((uchar *)volume + startIdx);
ConvertToInterpolationCoefficients(ptr, width, sizeof(float));
}
__global__ void SamplesToCoefficients3DY(float *volume, // in-place processing
uint pitch, // width in bytes
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in y-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint z = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = z * height * pitch;
float *ptr = (float *)((uchar *)volume + startIdx);
ConvertToInterpolationCoefficients(ptr + x, height, pitch);
}
__global__ void SamplesToCoefficients3DZ(float *volume, // in-place processing
uint pitch, // width in bytes
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in z-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint y = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = y * pitch;
const uint slice = height * pitch;
float *ptr = (float *)((uchar *)volume + startIdx);
ConvertToInterpolationCoefficients(ptr + x, depth, slice);
}
}
#endif //_3D_CUBIC_BSPLINE_PREFILTER_H_
|
db8bebef38116d5bb2432bb28446323523eec540.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#ifndef _SCAN_BEST_KERNEL_CU_
#define _SCAN_BEST_KERNEL_CU_
// Define this to more rigorously avoid bank conflicts,
// even at the lower (root) levels of the tree
// Note that due to the higher addressing overhead, performance
// is lower with ZERO_BANK_CONFLICTS enabled. It is provided
// as an example.
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// excellent paper "Prefix sums and their applications".
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
template <bool isNP2> __device__ void loadSharedChunkFromMem (float *s_data, const float *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB )
{
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
bankOffsetA = CONFLICT_FREE_OFFSET(ai); // compute spacing to avoid bank conflicts
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] = g_idata[mem_ai]; // Cache the computational window in shared memory pad values beyond n with zeros
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2> __device__ void loadSharedChunkFromMemInt (int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB )
{
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
bankOffsetA = CONFLICT_FREE_OFFSET(ai); // compute spacing to avoid bank conflicts
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] = g_idata[mem_ai]; // Cache the computational window in shared memory pad values beyond n with zeros
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2> __device__ void storeSharedChunkToMem(float* g_odata, const float* s_data, int n, int ai, int bi, int mem_ai, int mem_bi,int bankOffsetA, int bankOffsetB)
{
__syncthreads();
g_odata[mem_ai] = s_data[ai + bankOffsetA]; // write results to global memory
if (isNP2) { // compile-time decision
if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool isNP2> __device__ void storeSharedChunkToMemInt (int* g_odata, const int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi,int bankOffsetA, int bankOffsetB)
{
__syncthreads();
g_odata[mem_ai] = s_data[ai + bankOffsetA]; // write results to global memory
if (isNP2) { // compile-time decision
if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum> __device__ void clearLastElement( float* s_data, float *g_blockSums, int blockIndex)
{
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
s_data[index] = 0; // zero the last element in the scan so it will propagate back to the front
}
}
template <bool storeSum> __device__ void clearLastElementInt ( int* s_data, int *g_blockSums, int blockIndex)
{
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
s_data[index] = 0; // zero the last element in the scan so it will propagate back to the front
}
}
__device__ unsigned int buildSum(float *s_data)
{
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ unsigned int buildSumInt (int *s_data)
{
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeaves(float *s_data, unsigned int stride)
{
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
__device__ void scanRootToLeavesInt (int *s_data, unsigned int stride)
{
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum> __device__ void prescanBlock(float *data, int blockIndex, float *blockSums)
{
int stride = buildSum (data); // build the sum in place up the tree
clearLastElement<storeSum> (data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves (data, stride); // traverse down tree to build the scan
}
template <bool storeSum> __device__ void prescanBlockInt (int *data, int blockIndex, int *blockSums)
{
int stride = buildSumInt (data); // build the sum in place up the tree
clearLastElementInt <storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeavesInt (data, stride); // traverse down tree to build the scan
}
__global__ void uniformAdd (float *g_data, float *uniforms, int n, int blockOffset, int baseIndex)
{
__shared__ float uni;
if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
__global__ void uniformAddInt (int *g_data, int *uniforms, int n, int blockOffset, int baseIndex)
{
__shared__ int uni;
if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
#endif // #ifndef _SCAN_BEST_KERNEL_CU_
| db8bebef38116d5bb2432bb28446323523eec540.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#ifndef _SCAN_BEST_KERNEL_CU_
#define _SCAN_BEST_KERNEL_CU_
// Define this to more rigorously avoid bank conflicts,
// even at the lower (root) levels of the tree
// Note that due to the higher addressing overhead, performance
// is lower with ZERO_BANK_CONFLICTS enabled. It is provided
// as an example.
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// excellent paper "Prefix sums and their applications".
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
template <bool isNP2> __device__ void loadSharedChunkFromMem (float *s_data, const float *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB )
{
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
bankOffsetA = CONFLICT_FREE_OFFSET(ai); // compute spacing to avoid bank conflicts
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] = g_idata[mem_ai]; // Cache the computational window in shared memory pad values beyond n with zeros
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2> __device__ void loadSharedChunkFromMemInt (int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB )
{
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
bankOffsetA = CONFLICT_FREE_OFFSET(ai); // compute spacing to avoid bank conflicts
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] = g_idata[mem_ai]; // Cache the computational window in shared memory pad values beyond n with zeros
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2> __device__ void storeSharedChunkToMem(float* g_odata, const float* s_data, int n, int ai, int bi, int mem_ai, int mem_bi,int bankOffsetA, int bankOffsetB)
{
__syncthreads();
g_odata[mem_ai] = s_data[ai + bankOffsetA]; // write results to global memory
if (isNP2) { // compile-time decision
if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool isNP2> __device__ void storeSharedChunkToMemInt (int* g_odata, const int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi,int bankOffsetA, int bankOffsetB)
{
__syncthreads();
g_odata[mem_ai] = s_data[ai + bankOffsetA]; // write results to global memory
if (isNP2) { // compile-time decision
if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum> __device__ void clearLastElement( float* s_data, float *g_blockSums, int blockIndex)
{
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
s_data[index] = 0; // zero the last element in the scan so it will propagate back to the front
}
}
template <bool storeSum> __device__ void clearLastElementInt ( int* s_data, int *g_blockSums, int blockIndex)
{
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
s_data[index] = 0; // zero the last element in the scan so it will propagate back to the front
}
}
__device__ unsigned int buildSum(float *s_data)
{
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ unsigned int buildSumInt (int *s_data)
{
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeaves(float *s_data, unsigned int stride)
{
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
__device__ void scanRootToLeavesInt (int *s_data, unsigned int stride)
{
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum> __device__ void prescanBlock(float *data, int blockIndex, float *blockSums)
{
int stride = buildSum (data); // build the sum in place up the tree
clearLastElement<storeSum> (data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves (data, stride); // traverse down tree to build the scan
}
template <bool storeSum> __device__ void prescanBlockInt (int *data, int blockIndex, int *blockSums)
{
int stride = buildSumInt (data); // build the sum in place up the tree
clearLastElementInt <storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeavesInt (data, stride); // traverse down tree to build the scan
}
__global__ void uniformAdd (float *g_data, float *uniforms, int n, int blockOffset, int baseIndex)
{
__shared__ float uni;
if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
__global__ void uniformAddInt (int *g_data, int *uniforms, int n, int blockOffset, int baseIndex)
{
__shared__ int uni;
if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
#endif // #ifndef _SCAN_BEST_KERNEL_CU_
|
1ef8bea9d1b1215a42ebb8502e69321a0116e560.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021-2023, XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <xgboost/host_device_vector.h>
#include "../helpers.h"
#include "../../../src/data/array_interface.h"
namespace xgboost {
__global__ void SleepForTest(uint64_t *out, uint64_t duration) {
auto start = clock64();
auto t = 0;
while (t < duration) {
t = clock64() - start;
}
out[0] = t;
}
TEST(ArrayInterface, Stream) {
size_t constexpr kRows = 10, kCols = 10;
HostDeviceVector<float> storage;
auto arr_str = RandomDataGenerator{kRows, kCols, 0}.GenerateArrayInterface(&storage);
dh::HIPStreamMasqueradingAsCUDA stream;
auto j_arr = Json::Load(StringView{arr_str});
j_arr["stream"] = Integer(reinterpret_cast<int64_t>(stream.Handle()));
Json::Dump(j_arr, &arr_str);
dh::caching_device_vector<uint64_t> out(1, 0);
std::uint64_t dur = 1e9;
dh::LaunchKernel{1, 1, 0, stream.View()}(SleepForTest, out.data().get(), dur);
ArrayInterface<2> arr(arr_str);
auto t = out[0];
CHECK_GE(t, dur);
}
TEST(ArrayInterface, Ptr) {
std::vector<float> h_data(10);
ASSERT_FALSE(ArrayInterfaceHandler::IsCudaPtr(h_data.data()));
dh::safe_cuda(hipGetLastError());
dh::device_vector<float> d_data(10);
ASSERT_TRUE(ArrayInterfaceHandler::IsCudaPtr(d_data.data().get()));
dh::safe_cuda(hipGetLastError());
ASSERT_FALSE(ArrayInterfaceHandler::IsCudaPtr(nullptr));
dh::safe_cuda(hipGetLastError());
}
} // namespace xgboost
| 1ef8bea9d1b1215a42ebb8502e69321a0116e560.cu | /**
* Copyright 2021-2023, XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <xgboost/host_device_vector.h>
#include "../helpers.h"
#include "../../../src/data/array_interface.h"
namespace xgboost {
__global__ void SleepForTest(uint64_t *out, uint64_t duration) {
auto start = clock64();
auto t = 0;
while (t < duration) {
t = clock64() - start;
}
out[0] = t;
}
TEST(ArrayInterface, Stream) {
size_t constexpr kRows = 10, kCols = 10;
HostDeviceVector<float> storage;
auto arr_str = RandomDataGenerator{kRows, kCols, 0}.GenerateArrayInterface(&storage);
dh::CUDAStream stream;
auto j_arr = Json::Load(StringView{arr_str});
j_arr["stream"] = Integer(reinterpret_cast<int64_t>(stream.Handle()));
Json::Dump(j_arr, &arr_str);
dh::caching_device_vector<uint64_t> out(1, 0);
std::uint64_t dur = 1e9;
dh::LaunchKernel{1, 1, 0, stream.View()}(SleepForTest, out.data().get(), dur);
ArrayInterface<2> arr(arr_str);
auto t = out[0];
CHECK_GE(t, dur);
}
TEST(ArrayInterface, Ptr) {
std::vector<float> h_data(10);
ASSERT_FALSE(ArrayInterfaceHandler::IsCudaPtr(h_data.data()));
dh::safe_cuda(cudaGetLastError());
dh::device_vector<float> d_data(10);
ASSERT_TRUE(ArrayInterfaceHandler::IsCudaPtr(d_data.data().get()));
dh::safe_cuda(cudaGetLastError());
ASSERT_FALSE(ArrayInterfaceHandler::IsCudaPtr(nullptr));
dh::safe_cuda(cudaGetLastError());
}
} // namespace xgboost
|
bf7573b2f5ff1052d2af68962fd056814b2bd896.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#ifdef __INTELLISENSE__
void __syncthreads();
#endif
#include <stdio.h>
#include <windows.h>
#include <math.h>
#include <stdlib.h>
#include <stdint.h>
#include "Shlwapi.h"
#include <hip/hip_runtime.h>
#include <iomanip>
#include "helper_image.h"
using namespace std;
#pragma intrinsic(__rdtsc)
#define BLOCK_SIZE_X 128
#define BLOCK_SIZE_Y 8
#define CHECK_ERROR( call ) \
{ \
hipError_t result = call; \
if ( hipSuccess != result ) { \
cerr << "CUDA error " << result << " in " << __FILE__ << ":" << __LINE__ << ": " << hipGetErrorString( result ) << " (" << #call << ")" << endl; \
exit(1); } \
}
const int kernel[3][3] = {
{ 1, 1, 1 },
{ 1, -8, 1 },
{ 1, 1, 1 },
};
__device__ __constant__ int kernelGPU[3][3] = {
{ 1, 1, 1 },
{ 1, -8, 1 },
{ 1, 1, 1 },
};
__global__ void filter_kernel(BYTE* inputBitmap, BYTE* outputBitmap, int height, int width, int channels) {
const int xIndex = blockIdx.x * BLOCK_SIZE_X + threadIdx.x;
const int yIndex = blockIdx.y * BLOCK_SIZE_Y + threadIdx.y;
if (xIndex >= width * channels || yIndex >= height)
return;
int offsetX, offsetY, absX, absY;
int sum = 0;
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
offsetX = (j * 3 - channels);
offsetY = (i - 1);
absX = xIndex + offsetX;
absY = yIndex + offsetY;
if (absX < 0 || absX >= width * channels) absX = xIndex;
if (absY < 0 || absY >= height) absY = yIndex;
sum += inputBitmap[absX + absY * width * channels] * kernelGPU[i][j];
}
}
if (sum < 0) sum = 0;
if (sum > 255) sum = 255;
outputBitmap[xIndex + yIndex * width * channels] = sum;
}
BYTE* filter_CPU(BYTE* pixelData, int width, int height, int channels) {
BYTE* result = new BYTE[width * channels * height];
if (result == NULL)
return NULL;
int pos;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width * channels; x++)
{
int sum = 0;
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
int X = x + (j * 3 - channels);
int Y = y + (i - 1);
if (X >= width * channels || X < 0) X = x;
if (Y == height || Y == -1) Y = y;
pos = width * channels * Y + X;
int kernelVal = kernel[i][j];
sum += pixelData[pos] * kernelVal;
}
}
if (sum < 0) sum = 0;
if (sum > 255) sum = 255;
pos = width * channels * y + x;
result[pos] = (byte)sum;
}
}
return result;
}
BYTE* filter_GPU(BYTE* pixelData, int width, int height, int channels)
{
float timeGPU = NULL;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
size_t size = width * channels * height;
BYTE* result = new BYTE[size];
BYTE* pixelDataGPU, * resultGPU;
CHECK_ERROR(hipMalloc((void**)&pixelDataGPU, size));
CHECK_ERROR(hipMalloc((void**)&resultGPU, size));
CHECK_ERROR(hipMemcpy(pixelDataGPU, pixelData, size, hipMemcpyHostToDevice));
int gridSize_X = (int)ceil((double)width * channels / (double)BLOCK_SIZE_X);
int gridSize_Y = (int)ceil((double)height / (double)BLOCK_SIZE_Y);
dim3 dimGrid(gridSize_X, gridSize_Y);
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
CHECK_ERROR(hipEventRecord(start));
filter_kernel << <dimGrid, dimBlock >> > (pixelDataGPU, resultGPU, height, width, channels);
hipMemcpy(result, resultGPU, size, hipMemcpyDeviceToHost);
CHECK_ERROR(hipEventRecord(stop));
CHECK_ERROR(hipEventSynchronize(stop));
CHECK_ERROR(hipEventElapsedTime(&timeGPU, start, stop));
cout << setw(30) << left << "GPU time: " << timeGPU << " ms" << endl;
CHECK_ERROR(hipEventDestroy(start));
CHECK_ERROR(hipEventDestroy(stop));
CHECK_ERROR(hipFree(pixelDataGPU));
CHECK_ERROR(hipFree(resultGPU));
CHECK_ERROR(hipDeviceReset());
return result;
}
bool isEquals(BYTE* a, BYTE* b, int width, int height, int channels) {
for (int i = 0; i < width * channels; i++)
for (int j = 0; j < height; j++)
if (a[i + j * width * channels] != b[i + j * width * channels]) {
return false;
}
return true;
}
int main() {
unsigned int width = 0, height = 0, channels;
const char srcImage[] = "nature.ppm";
const char imageCPU[] = "imageCPU.pgm";
const char imageGPU[] = "imageGPU.pgm";
BYTE* srcData = NULL, * GPUData = NULL, * CPUData = NULL;
__loadPPM(srcImage, &srcData, &width, &height, &channels);
auto start_time = __rdtsc();
CPUData = filter_CPU(srcData, width, height, channels);
auto end_time = __rdtsc();
cout << setw(30) << left << "CPU time: " << (end_time - start_time) / 3590000 << " ms" << endl;
GPUData = filter_GPU(srcData, width, height, channels);
isEquals(CPUData, GPUData, width, height, channels) ? cout << "equals" << endl : cout << "not equals" << endl;
__savePPM(imageCPU, CPUData, width, height, channels);
__savePPM(imageGPU, GPUData, width, height, channels);
free(srcData);
free(GPUData);
free(CPUData);
return 0;
} | bf7573b2f5ff1052d2af68962fd056814b2bd896.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#ifdef __INTELLISENSE__
void __syncthreads();
#endif
#include <stdio.h>
#include <windows.h>
#include <math.h>
#include <stdlib.h>
#include <stdint.h>
#include "Shlwapi.h"
#include <cuda.h>
#include <iomanip>
#include "helper_image.h"
using namespace std;
#pragma intrinsic(__rdtsc)
#define BLOCK_SIZE_X 128
#define BLOCK_SIZE_Y 8
#define CHECK_ERROR( call ) \
{ \
cudaError_t result = call; \
if ( cudaSuccess != result ) { \
cerr << "CUDA error " << result << " in " << __FILE__ << ":" << __LINE__ << ": " << cudaGetErrorString( result ) << " (" << #call << ")" << endl; \
exit(1); } \
}
const int kernel[3][3] = {
{ 1, 1, 1 },
{ 1, -8, 1 },
{ 1, 1, 1 },
};
__device__ __constant__ int kernelGPU[3][3] = {
{ 1, 1, 1 },
{ 1, -8, 1 },
{ 1, 1, 1 },
};
__global__ void filter_kernel(BYTE* inputBitmap, BYTE* outputBitmap, int height, int width, int channels) {
const int xIndex = blockIdx.x * BLOCK_SIZE_X + threadIdx.x;
const int yIndex = blockIdx.y * BLOCK_SIZE_Y + threadIdx.y;
if (xIndex >= width * channels || yIndex >= height)
return;
int offsetX, offsetY, absX, absY;
int sum = 0;
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
offsetX = (j * 3 - channels);
offsetY = (i - 1);
absX = xIndex + offsetX;
absY = yIndex + offsetY;
if (absX < 0 || absX >= width * channels) absX = xIndex;
if (absY < 0 || absY >= height) absY = yIndex;
sum += inputBitmap[absX + absY * width * channels] * kernelGPU[i][j];
}
}
if (sum < 0) sum = 0;
if (sum > 255) sum = 255;
outputBitmap[xIndex + yIndex * width * channels] = sum;
}
BYTE* filter_CPU(BYTE* pixelData, int width, int height, int channels) {
BYTE* result = new BYTE[width * channels * height];
if (result == NULL)
return NULL;
int pos;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width * channels; x++)
{
int sum = 0;
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
int X = x + (j * 3 - channels);
int Y = y + (i - 1);
if (X >= width * channels || X < 0) X = x;
if (Y == height || Y == -1) Y = y;
pos = width * channels * Y + X;
int kernelVal = kernel[i][j];
sum += pixelData[pos] * kernelVal;
}
}
if (sum < 0) sum = 0;
if (sum > 255) sum = 255;
pos = width * channels * y + x;
result[pos] = (byte)sum;
}
}
return result;
}
BYTE* filter_GPU(BYTE* pixelData, int width, int height, int channels)
{
float timeGPU = NULL;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
size_t size = width * channels * height;
BYTE* result = new BYTE[size];
BYTE* pixelDataGPU, * resultGPU;
CHECK_ERROR(cudaMalloc((void**)&pixelDataGPU, size));
CHECK_ERROR(cudaMalloc((void**)&resultGPU, size));
CHECK_ERROR(cudaMemcpy(pixelDataGPU, pixelData, size, cudaMemcpyHostToDevice));
int gridSize_X = (int)ceil((double)width * channels / (double)BLOCK_SIZE_X);
int gridSize_Y = (int)ceil((double)height / (double)BLOCK_SIZE_Y);
dim3 dimGrid(gridSize_X, gridSize_Y);
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
CHECK_ERROR(cudaEventRecord(start));
filter_kernel << <dimGrid, dimBlock >> > (pixelDataGPU, resultGPU, height, width, channels);
cudaMemcpy(result, resultGPU, size, cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaEventRecord(stop));
CHECK_ERROR(cudaEventSynchronize(stop));
CHECK_ERROR(cudaEventElapsedTime(&timeGPU, start, stop));
cout << setw(30) << left << "GPU time: " << timeGPU << " ms" << endl;
CHECK_ERROR(cudaEventDestroy(start));
CHECK_ERROR(cudaEventDestroy(stop));
CHECK_ERROR(cudaFree(pixelDataGPU));
CHECK_ERROR(cudaFree(resultGPU));
CHECK_ERROR(cudaDeviceReset());
return result;
}
bool isEquals(BYTE* a, BYTE* b, int width, int height, int channels) {
for (int i = 0; i < width * channels; i++)
for (int j = 0; j < height; j++)
if (a[i + j * width * channels] != b[i + j * width * channels]) {
return false;
}
return true;
}
int main() {
unsigned int width = 0, height = 0, channels;
const char srcImage[] = "nature.ppm";
const char imageCPU[] = "imageCPU.pgm";
const char imageGPU[] = "imageGPU.pgm";
BYTE* srcData = NULL, * GPUData = NULL, * CPUData = NULL;
__loadPPM(srcImage, &srcData, &width, &height, &channels);
auto start_time = __rdtsc();
CPUData = filter_CPU(srcData, width, height, channels);
auto end_time = __rdtsc();
cout << setw(30) << left << "CPU time: " << (end_time - start_time) / 3590000 << " ms" << endl;
GPUData = filter_GPU(srcData, width, height, channels);
isEquals(CPUData, GPUData, width, height, channels) ? cout << "equals" << endl : cout << "not equals" << endl;
__savePPM(imageCPU, CPUData, width, height, channels);
__savePPM(imageGPU, GPUData, width, height, channels);
free(srcData);
free(GPUData);
free(CPUData);
return 0;
} |
fac9a512a632350322903d932b2608bfe006609d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// from http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
/*
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
*/
__global__ void device_global(unsigned int *input_array, int num_elements) {
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
// stop out of bounds access
if (my_index < num_elements) {
if (my_index%2 == 1) {
// even threads write their index to their array entry
input_array[my_index] = my_index;
} else {
// odd threads copy their value from the next array entry
input_array[my_index] = input_array[my_index+1];
}
}
}
/*
int main(void) {
// how big our array for interfacing with the GPU will be
int num_elements = 100;
int num_bytes = sizeof(unsigned int) * num_elements;
// pointers for the interfacing arrays
unsigned int *host_array = 0;
unsigned int *device_array = 0;
// malloc for host and device
host_array = (unsigned int*) malloc(num_bytes);
hipMalloc((void **) &device_array, num_bytes);
// check the mallocs
if (host_array == 0) {
printf("Unable to allocate memory on host");
return 1;
}
if (device_array == 0) {
printf("Unable to allocate memory on device");
return 1;
}
// set host array values
for (int i = 0; i<num_elements; i++) {
host_array[i] = 1;
}
// copy them to the GPU
hipMemcpy(device_array, host_array, num_bytes, hipMemcpyHostToDevice);
// define block and grid sizes
int block_size = 128;
int grid_size = (num_elements + block_size - 1) / block_size;
// run GPU code
device_global<<<grid_size, block_size>>>(device_array, num_elements);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// copy output to host
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
// print any information
for (int i=0; i<num_elements; i++) {
printf("%03u, ", host_array[i]);
if (i%10 == 9) {
printf(" \n");
}
}
// free memory
free(host_array);
hipFree(device_array);
}
*/
| fac9a512a632350322903d932b2608bfe006609d.cu | #include <stdio.h>
// from http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
/*
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
*/
__global__ void device_global(unsigned int *input_array, int num_elements) {
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
// stop out of bounds access
if (my_index < num_elements) {
if (my_index%2 == 1) {
// even threads write their index to their array entry
input_array[my_index] = my_index;
} else {
// odd threads copy their value from the next array entry
input_array[my_index] = input_array[my_index+1];
}
}
}
/*
int main(void) {
// how big our array for interfacing with the GPU will be
int num_elements = 100;
int num_bytes = sizeof(unsigned int) * num_elements;
// pointers for the interfacing arrays
unsigned int *host_array = 0;
unsigned int *device_array = 0;
// malloc for host and device
host_array = (unsigned int*) malloc(num_bytes);
cudaMalloc((void **) &device_array, num_bytes);
// check the mallocs
if (host_array == 0) {
printf("Unable to allocate memory on host");
return 1;
}
if (device_array == 0) {
printf("Unable to allocate memory on device");
return 1;
}
// set host array values
for (int i = 0; i<num_elements; i++) {
host_array[i] = 1;
}
// copy them to the GPU
cudaMemcpy(device_array, host_array, num_bytes, cudaMemcpyHostToDevice);
// define block and grid sizes
int block_size = 128;
int grid_size = (num_elements + block_size - 1) / block_size;
// run GPU code
device_global<<<grid_size, block_size>>>(device_array, num_elements);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// copy output to host
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
// print any information
for (int i=0; i<num_elements; i++) {
printf("%03u, ", host_array[i]);
if (i%10 == 9) {
printf(" \n");
}
}
// free memory
free(host_array);
cudaFree(device_array);
}
*/
|
3dd2f0a2c9cc7cc57c3d3377b9d705a0343cb162.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<cuda_runtime.h>
#define N 10000000
__global__ void vector_add(float *out, float *a, float *b, int n){
for(int i=0;i<n;i++){
out[i]=a[i]+b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
a=(float*)malloc(sizeof(float)*N);
b=(float*)malloc(sizeof(float)*N);
out=(float*)malloc(sizeof(float)*N);
for(int i=0; i<N; i++){
a[i]=1.0f; b[i]=2.0f;
}
hipMalloc((void**)&d_a,sizeof(float)*N);
hipMalloc((void**)&d_b,sizeof(float)*N);
hipMalloc((void**)&d_out,sizeof(float)*N);
hipMemcpy(d_a, a, sizeof(float)*N, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(float)*N, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vector_add), dim3(1),dim3(1), 0, 0, d_out, d_a, d_b, N);
hipMemcpy(out, d_out, sizeof(float)*N, hipMemcpyDeviceToHost);
printf("%f\n", out[0]);
// Deallocate device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
return 0;
}
| 3dd2f0a2c9cc7cc57c3d3377b9d705a0343cb162.cu | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<cuda_runtime.h>
#define N 10000000
__global__ void vector_add(float *out, float *a, float *b, int n){
for(int i=0;i<n;i++){
out[i]=a[i]+b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
a=(float*)malloc(sizeof(float)*N);
b=(float*)malloc(sizeof(float)*N);
out=(float*)malloc(sizeof(float)*N);
for(int i=0; i<N; i++){
a[i]=1.0f; b[i]=2.0f;
}
cudaMalloc((void**)&d_a,sizeof(float)*N);
cudaMalloc((void**)&d_b,sizeof(float)*N);
cudaMalloc((void**)&d_out,sizeof(float)*N);
cudaMemcpy(d_a, a, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float)*N, cudaMemcpyHostToDevice);
vector_add<<<1,1>>>(d_out, d_a, d_b, N);
cudaMemcpy(out, d_out, sizeof(float)*N, cudaMemcpyDeviceToHost);
printf("%f\n", out[0]);
// Deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
return 0;
}
|
0d02294b7a3fc17facf03092e3e0428fb7529869.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double * __tilevar_0__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_1__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double t2_0=0.0f, t2_1=0.0f, t3_0=0.0f, t3_1=0.0f, t4_0=0.0f, t4_1=0.0f, t5_0=0.0f, t5_1=0.0f, out=0.0f;
double b2_0=0.0f, b2_1=0.0f, b3_0=0.0f, b3_1=0.0f, b4_0=0.0f, b4_1=0.0f, b5_0=0.0f, b5_1=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-16);
int __iter_y__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y));
//Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_1__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = 0.0f;
}
// Initial loop
for (int __iter_1__ = FORMA_MAX(0,__iter_y__-8); __iter_1__ <= __iter_y__+7; __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t4_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t4_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b4_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b4_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+8),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t5_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t5_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b5_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b5_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3_1; b3_1 = b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4_1; b4_1 = b4_0; b4_0=t4_0; t4_0=t4_1; t4_1=0.0f;
out=b5_1; b5_1=b5_0; b5_0=t5_0; t5_0=t5_1; t5_1=0.0f;
}
// Rest of the computation
for (int __iter_1__ = __iter_y__+8; __iter_1__ <= FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+7); __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t4_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t4_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b4_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b4_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+8),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t5_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t5_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b5_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b5_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
__var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-8,0)] = out;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3_1; b3_1 = b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4_1; b4_1 = b4_0; b4_0=t4_0; t4_0=t4_1; t4_1=0.0f;
out=b5_1; b5_1=b5_0; b5_0=t5_0; t5_0=t5_1; t5_1=0.0f;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-16);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__size_0___kernel___forma_kernel__0__/32);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_0___kernel___forma_kernel__0__/32, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
}
/*Host Free End*/
| 0d02294b7a3fc17facf03092e3e0428fb7529869.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double * __tilevar_0__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_1__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double t2_0=0.0f, t2_1=0.0f, t3_0=0.0f, t3_1=0.0f, t4_0=0.0f, t4_1=0.0f, t5_0=0.0f, t5_1=0.0f, out=0.0f;
double b2_0=0.0f, b2_1=0.0f, b3_0=0.0f, b3_1=0.0f, b4_0=0.0f, b4_1=0.0f, b5_0=0.0f, b5_1=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-16);
int __iter_y__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y));
//Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_1__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = 0.0f;
}
// Initial loop
for (int __iter_1__ = FORMA_MAX(0,__iter_y__-8); __iter_1__ <= __iter_y__+7; __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t4_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t4_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b4_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b4_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+8),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t5_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t5_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b5_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b5_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3_1; b3_1 = b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4_1; b4_1 = b4_0; b4_0=t4_0; t4_0=t4_1; t4_1=0.0f;
out=b5_1; b5_1=b5_0; b5_0=t5_0; t5_0=t5_1; t5_1=0.0f;
}
// Rest of the computation
for (int __iter_1__ = __iter_y__+8; __iter_1__ <= FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+7); __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t4_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t4_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b4_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b4_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_2__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+8),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))) {
// Bottom -2
double __temp_2__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_5__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_10__ = (__temp_6__ + 5 * __temp_9__);
double __temp_13__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_14__ = (__temp_10__ + 4 * __temp_13__);
double __temp_17__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t5_1 += __temp_18__;
// Bottom -1
double __temp_21__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
double __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_30__ = (__temp_26__ + 12 * __temp_29__);
double __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_34__ = (__temp_30__ + 9 * __temp_33__);
double __temp_37__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t5_0 += __temp_38__;
// Mid
double __temp_41__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_45__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
double __temp_49__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_50__ = (__temp_46__ + 15 * __temp_49__);
double __temp_53__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_54__ = (__temp_50__ + 12 * __temp_53__);
double __temp_57__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b5_0 += __temp_58__;
// Top +1
double __temp_61__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_65__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
double __temp_69__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_70__ = (__temp_66__ + 12 * __temp_69__);
double __temp_73__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_74__ = (__temp_70__ + 9 * __temp_73__);
double __temp_77__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b5_1 += __temp_78__;
// Top +2
double __temp_81__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
double __temp_85__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
double __temp_89__ = (__tilevar_3__[__iter_3__-__iter_0__]);
double __temp_90__ = (__temp_86__ + 5 * __temp_89__);
double __temp_93__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_94__ = (__temp_90__ + 4 * __temp_93__);
double __temp_97__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
double __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
__var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-8,0)] = out;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3_1; b3_1 = b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4_1; b4_1 = b4_0; b4_0=t4_0; t4_0=t4_1; t4_1=0.0f;
out=b5_1; b5_1=b5_0; b5_0=t5_0; t5_0=t5_1; t5_1=0.0f;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-16);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__size_0___kernel___forma_kernel__0__/32);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_0___kernel___forma_kernel__0__/32, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
}
/*Host Free End*/
|
ee0c3d81ab981cd48af056cc5a5f2c26fba5e06b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <helper_cuda.h>
#include <iostream>
#include <set>
#include "../../cudaconv3/include/cudaconv2.cuh"
#include "../../util/include/matrix.h"
#include "../include/layer_kernels.cuh"
#include "../include/layer.cuh"
#include "../include/data.cuh"
#include "../include/util.cuh"
#include "../include/weights.cuh"
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) :
_convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_numOutputs = pyDictGetInt(paramsDict, "outputs");
_numReplicas = pyDictGetInt(paramsDict, "numReplicas");
_numReplicasPrev = 1;
_rcvdBInputMsgs = 0;
_actBroadcaster = NULL;
_gradReducer = NULL;
_initialized = false;
}
Layer::~Layer() {
if (_actBroadcaster != NULL) {
_actBroadcaster->stop();
delete _actBroadcaster;
}
if (_gradReducer != NULL) {
_gradReducer->stop();
delete _gradReducer;
}
// For now, gradReducer doesn't have a destructor
// delete _gradReducer;
for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
}
hipStream_t Layer::getStream() {
assert(getDeviceID() >= 0);
return NVMatrix::getDefaultStream(getDeviceID());
}
void Layer::syncStream() {
NVMatrix::syncStream(getStream());
}
void Layer::fpropNext(PASS_TYPE passType, int passIdx) {
if (_next.size() > 0) {
if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining
if (_nextDeviceIDs.size() > 1 || (_nextDeviceIDs.size() == 1 && _nextDeviceIDs[0] != getDeviceID())) {
syncStream(); // Make sure I've finished computing before broadcasting
}
getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue));
}
if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) {
_broadcastFinishQueue.dequeue();
assert(_broadcastFinishQueue.getNumElements() == 0);
}
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
bool Layer::fprop(PASS_TYPE passType, int passIdx) {
_rcvdFInputMsgs++;
// I require messages from *all* input replicas because it makes the propagation easier to think about.
// Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation
// might not actually be finished yet.
if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) {
//printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID());
int ridx = getFwdActiveInputReplicaIdx(passIdx);
assert(getDeviceID() == NVMatrix::getDeviceID());
map<int, NVMatrix*> v;
if (ridx >= 0) {
for (int i = 0; i < getNumLayersPrev(); i++) {
v[i] = &_prev[ridx][i]->getActs(getDeviceID());
}
}
fprop(v, passType, passIdx);
return true;
}
return false;
}
void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) {
if (getFwdActiveInputReplicaIdx(passIdx) >= 0) {
assert(v.size() == getNumLayersPrev());
_inputs.clear();
_inputs.insert(v.begin(), v.end());
int numCases = _inputs[0]->getLeadingDim();
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemory(numCases);
}
if (numCases > 0) {
//printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases);
_rcvdFInputMsgs = getNumExpectedFwdMsgs();
for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) {
it->second->transpose(_trans);
}
getActs().transpose(_trans);
fpropCommon(passType);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType, passIdx);
}
// Then add the rest of the inputs to that
for (int i = 0; i < getNumLayersPrev(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx);
}
}
}
}
fpropNext(passType, passIdx);
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_actsGradTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
if (_actsTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
}
int Layer::getNumGradProducersNext() {
return _numGradProducersNext;
}
int Layer::getNumExpectedBwdMsgs() {
return _numGradProducersNext * getNumSiblingReplicas();
}
int Layer::getNumExpectedFwdMsgs() {
return getNumLayersPrev() * getNumInputReplicas();
}
void Layer::bprop(PASS_TYPE passType, int passIdx) {
if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) {
// printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID());
if (_gradReducer != NULL) {
_gradReducer->waitForFinish();
}
// This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages
bprop(getActsGrad(), passType, passIdx);
if (_bwdTerminal[passIdx]) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL));
}
}
}
void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) {
Layer& prev = *_prev[replicaIdx][inputIdx];
if (prev.isGradConsumer() && isGradProducer(prev.getName())) {
if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0
bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0, passType);
}
prev.getNumComputedActsGrads(getDeviceID())++;
// Synchronize if the previous layer is going to actually do a reduction.
// If the previous layer is on the same GPU as us and has no next layers
// on other GPUs then it won't need to do a reduction.
if (prev.getNextDeviceIDs().size() > 1 || (prev.getNextDeviceIDs().size() == 1 && getDeviceID() != prev.getDeviceID())) {
syncStream();
}
prev.getGradReducer().enqueueReduction(getDeviceID());
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
v.transpose(_trans);
assert(getDeviceID() == NVMatrix::getDeviceID());
int ridx = getBwdActiveInputReplicaIdx(passIdx);
LayerV& prev = _prev[ridx];
map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx];
for (int i = 0; i < prev.size(); i++) {
_inputs[i]->transpose(_trans);
prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
// NOTE: this should be here (before the bpropActs) because if you have a layer
// that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite
// v which is used in bpropCommon. So bpropCommon must come first.
bpropCommon(v, ridx, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) {
const set<Layer*>& deviceLayers = it->second;
for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) {
if (_actsGradTarget != (*it2)->getInputIdx(_name)) {
bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name));
}
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0) {
bpropActsCall(v, passType, ridx, _actsGradTarget);
}
}
// Synchronization is necessary because the kernel calls that compute my backward acts
// execute asynchronously. Therefore I don't want to tell other threads that I've
// computed bprop activities for them when in fact I've only called a function which
// will eventually compute them.
if (_prevDeviceIDs.size() > 1 || (_prevDeviceIDs.size() == 1 && _prevDeviceIDs[0] != getDeviceID())) {
syncStream();
}
if (getConvNet().isConserveMemory()) {
truncBwdActs();
}
if (isGradProducer()) {
/*for (int i = 0; i < prev.size(); i++) {
if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) {
prev[i]->getGradReducer().enqueueReduction(getDeviceID());
}
}*/
// Send backward messages to *all* replicas.
// Note that the messages will be dismissed unless the passIdx indicates
// that the previous layer should do some work.
for (int r = 0; r < getNumInputReplicas(); r++) {
for (int i = 0; i < _prev[r].size(); i++) {
if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) {
_prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx));
}
}
}
}
}
IActGradReducer& Layer::getGradReducer() {
return *_gradReducer;
}
// This is called between minibatches
void Layer::reset() {
_rcvdFInputMsgs = 0;
_rcvdBInputMsgs = 0;
for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) {
it->second = 0;
}
}
// This is called between microbatches
void Layer::resetPassIdx() {
_rcvdFInputMsgs = 0;
if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) {
reset();
}
}
/*
* Returns number of cases in given matrix.
*/
int Layer::getNumCases(NVMatrix& v) {
return v.getLeadingDim();
}
int Layer::incRcvdBInputMsgs() {
return ++_rcvdBInputMsgs;
}
std::string& Layer::getName() {
return _name;
}
std::string& Layer::getType() {
return _type;
}
int& Layer::getNumComputedActsGrads(int deviceID) {
return _numComputedActsGrads[deviceID];
}
void Layer::addNext(Layer& l) {
_next.push_back(&l);
_numReplicasNext = l.getNumReplicas();
if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_nextDeviceIDs.size() + 1);
_nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addPrev(Layer& l, int replicaIdx) {
_prev[replicaIdx].push_back(&l);
_numReplicasPrev = l.getNumReplicas();
l.setInputIdx(getName(), _prev[replicaIdx].size() - 1);
if (l.getDeviceID() >= 0 && count(_prevDeviceIDs.begin(), _prevDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_prevDeviceIDs.size() + 1);
_prevDeviceIDs.insert(_prevDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addReplica(Layer& l) {
assert(_replicas.count(l.getReplicaID()) == 0);
_replicas[l.getReplicaID()] = &l;
}
bool Layer::hasGradProducerNext(std::string& layerName) {
bool b = _next.size() == 0;
for (int i = 0; i < _next.size(); i++) {
b |= _next[i]->hasGradProducerNext(_name);
}
return b && isGradProducer(layerName);
}
bool Layer::postInit() {
// We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop().
// In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating
// it from _prev->getActs()
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
if (!_initialized) {
_initialized = true;
map<int,int> numGradProducersNext;
_numGradProducersNext = 0;
for (int r = 0; r < getNumInputReplicas(); ++r) {
for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) {
(*it)->postInit();
}
}
_memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name);
// _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0]
_memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name);
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
_numComputedActsGrads[d] = 0;
if (_next[i]->hasGradProducerNext(_name)) {
if (numGradProducersNext.count(d) == 0) {
numGradProducersNext[d] = 0;
}
numGradProducersNext[d]++;
_numGradProducersNext++;
if (_memSrcActsGrad.count(d) == 0) {
_memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_memSrcActs.count(d) == 0) {
_memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_next.size() == 0) {
_numReplicasNext = getNumReplicas();
}
/*
* Initialize forward broadcaster. First sibling owns it.
*/
if (getReplicaIdx() == 0 && _convNetThread != NULL) {
_actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID()));
_actBroadcaster->start();
}
/*
* Initialize backward reducer.
*/
if (isGradConsumer() && _numGradProducersNext > 0) {
_gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext);
_gradReducer->start();
}
/*
* Initialize specially sorted previous array
*/
for (int r = 0; r < _prev.size(); ++r) {
for (int i = 0; i < _prev[r].size(); ++i) {
// Previous devices in reverse order of processing by (sequential) GradReducer
_prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID()
+ 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]);
}
}
return true;
}
return false;
}
ActBroadcaster& Layer::getActBroadcaster() {
return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers && _prev.size() > 0) {
for (int i = 0; i < _prev[0].size(); i++) {
_gradConsumer |= _prev[0][i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
bool Layer::isGradProducer(std::string& layerName) {
return isGradProducer();
}
map<int,vector<Layer*> >& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
return getActs(getDeviceID());
}
NVMatrix& Layer::getActs(int deviceID) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory();
}
NVMatrix& Layer::getActs(int deviceID, int numCases) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory(numCases);
}
NVMatrix& Layer::getActsGrad(int deviceID) {
assert(_memSrcActsGrad.count(deviceID) > 0);
return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim());
}
NVMatrix& Layer::getActsGrad() {
return getActsGrad(NVMatrix::getDeviceID());
}
map<int, NVMatrix*> Layer::getAllActs() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
map<int, NVMatrix*> Layer::getAllActsGrads() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
int Layer::getDeviceID() {
return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID();
}
ConvNetThread& Layer::getConvNetThread() {
assert(_convNetThread != NULL);
return *_convNetThread;
}
ConvNet& Layer::getConvNet() {
return getConvNetThread().getConvNet();
}
void Layer::setBwdTerminal(int passIdx) {
_bwdTerminal[passIdx] = true;
}
int Layer::getReplicaID() {
return _replicaID;
}
int Layer::getActivePassPeriod() {
return getNumReplicas() / getConvNet().getNumReplicasMin();
}
int Layer::getFwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return passIdx % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getBwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getFwdActiveReplicaIdx(int passIdx) {
assert(_next.size() > 0);
return _next[0]->getFwdActiveInputReplicaIdx(passIdx);
}
int Layer::getNumReplicas() {
return _replicas.size();
}
int Layer::getNumSiblingReplicas() {
return getNumReplicas() / getNumReplicasNext();
}
int Layer::getNumReplicasPrev() {
return _numReplicasPrev;
}
int Layer::getNumReplicasNext() {
return _numReplicasNext;
}
int Layer::getNumInputReplicas() {
return _numReplicasPrev / getNumReplicas();
}
int Layer::getReplicaIdx() {
return getReplicaID() % getNumSiblingReplicas();
}
int Layer::getNumLayersPrev() {
return _prev.size() > 0 ? _prev[0].size() : 0;
}
void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) {
assert(_memSrcActs[deviceID]->isParent());
delete _memSrcActs[deviceID];
_memSrcActs[deviceID] = &mem;
if (_actsTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName()));
}
}
void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) {
assert(_memSrcActsGrad[deviceID]->isParent());
delete _memSrcActsGrad[deviceID];
_memSrcActsGrad[deviceID] = &mem;
if (_actsGradTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName()));
}
}
MemoryView& Layer::getMemorySourceActs(int deviceID) {
return *_memSrcActs[deviceID];
}
MemoryView& Layer::getMemorySourceActsGrad(int deviceID) {
return *_memSrcActsGrad[deviceID];
}
int Layer::getNumOutputs() {
return _numOutputs;
}
void Layer::setInputIdx(std::string& parentName, int idx) {
_inputIndices[parentName] = idx;
}
int Layer::getInputIdx(std::string& parentName) {
return _inputIndices[parentName];
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true) {
PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron");
_neuronType = pyDictGetString(neuronDict, "type");
_neuron = &Neuron::makeNeuron(neuronDict);
}
NeuronLayer::~NeuronLayer() {
delete _neuron;
}
void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) {
_neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0);
}
}
bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// Special optimization for cross-entropy objective with logistic units.
// Better to just compute the input gradient in one go to avoid division by small numbers.
bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1
&& (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce")
&& _next[0]->getDeviceID() == getDeviceID()
&& _next[0]->getNumReplicas() == getNumReplicas();
LayerV& prev = _prev[replicaIdx];
if (doCrossEntGrad) {
NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID());
BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]);
float gradCoeff = cost.getCoeff();
labels.transpose(_trans);
if (cost.getPosWeight() == 1) {
if (scaleTargets == 0) {
getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
} else {
if (scaleTargets == 0) {
getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
}
}
return doCrossEntGrad;
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_neuron->activate(*_inputs[0], getActs());
}
std::string& NeuronLayer::getNeuronType() {
return _neuronType;
}
/*
* =======================
* WeightLayer
* =======================
*
* The useGrad parameter here merely expresses a preference by the subclass. It may
* be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes.
* So when computing gradient updates, the subclass must always first check weights.isUseGrad().
*
* Note: biases always useGrad.
*/
WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) :
Layer(convNetThread, paramsDict, replicaID, trans) {
_weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod");
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW");
PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed");
int numEpochs = pyDictGetInt(paramsDict, "num_epochs");
/*
* When there are multiple replicas, the present implementation
* requires that useGrad is true. This is because weights.update()
* performs a simultaneous write to both replicas' weightsInc matrix,
* which means that the read should come from somewhere else (i.e. a
* grads matrix).
*/
useGrad |= _numReplicas > 1;
// Source layers for shared weights
stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
_weights = new WeightList();
for (int i = 0; i < weightSourceLayers.size(); i++) {
std::string& srcLayerName = weightSourceLayers[i];
int matrixIdx = weightSourceMatrixIndices[i];
PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i);
ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW, numEpochs); // Learning rate schedule
if (srcLayerName == _name) { // Current layer
_weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this));
} else if (srcLayerName != "") {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights->addWeights(*new Weights(*srcWeights, lrs, *this));
} else {
_weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB, numEpochs), *this, 0, 0, momB, true);
delete &weightSourceLayers;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &wc;
delete &wball;
_wStep = 0.02;
_bStep = 0.005;
}
WeightLayer::~WeightLayer() {
delete _weights;
delete _biases;
}
bool WeightLayer::postInit() {
if (Layer::postInit()) {
_weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod());
assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0);
return true;
}
return false;
}
void WeightLayer::fpropCommon(PASS_TYPE passType) {
}
void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) {
if (_biases->getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropBiases(v, passType);
} else {
_biases->getGrad().resize(_biases->getW());
_biases->getGrad().scale(getBIncScale());
}
_biases->incNumUpdates();
}
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropWeights(v, replicaIdx, i, passType);
} else {
_weights->at(i).getGrad().resize(_weights->at(i).getW());
// This will cause it to forget momentum when shown 0 training cases
// and _useGrad = false but it's not too important.
_weights->at(i).getGrad().scale(getIncScale(i, passType));
}
// Increment its number of updates
_weights->at(i).incNumUpdates();
}
}
}
bool WeightLayer::updateWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_weights->update(getConvNet().getTrainingProgress());
_biases->update(getConvNet().getTrainingProgress());
// constrainWeights();
return true;
}
return false;
}
bool WeightLayer::constrainWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_constrainWeights();
return true;
}
return false;
}
void WeightLayer::_constrainWeights() {
}
void WeightLayer::copyToCPU() {
_weights->copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights->copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradient() {
for (int i = 0; i < _weights->getSize(); i++) {
getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i));
}
getConvNet().checkGradient(_name + " biases", _bStep, *_biases);
}
void WeightLayer::addReplica(Layer& l) {
Layer::addReplica(l);
_weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights);
_biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights->at(idx);
}
float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) {
// weight update period must be multiple of activation period
// TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate.
double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses()));
if (_weights->at(inpIdx).isUseGrad()) {
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases;
}
float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) {
if (_weights->at(inpIdx).isUseGrad()) {
return _weights->at(inpIdx).getNumUpdates() > 0;
}
return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0
: (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f));
}
NVMatrix& WeightLayer::getGradTarget(int inpIdx) {
return _weights->at(inpIdx).getGrad();
}
float WeightLayer::getBGradScale(PASS_TYPE passType) {
int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses());
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
float WeightLayer::getBIncScale() {
return _biases->getNumUpdates() > 0;
}
NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) {
return _weights->at(inpIdx).getW();
}
NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) {
return _biases->getW();
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) {
_wStep = 0.01;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
}
void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType));
}
void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
float scaleGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
void FCLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2; // Unfortunate extra weight matrix...
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* SplitFCLayer
* =======================
*/
SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: FCLayer(convNetThread, paramsDict, replicaID, useGrad) {
_numParts = pyDictGetInt(paramsDict, "parts");
}
void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true);
NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts);
NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts);
NVMatrixV& splitTarget = getActs().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
deleteElements(splitInput, true);
deleteElements(splitWeights, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts);
NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1);
delete &weights_T;
deleteElements(splitV, true);
deleteElements(splitWeights_T, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts);
NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType));
delete &prevActs_T;
deleteElements(splitPrevActs_T, true);
deleteElements(splitV, true);
deleteElements(splitGradTarget, true);
}
/*
* =======================
* TwoDLayerInterface
* =======================
*/
TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_imgPixels = _imgSize * _imgSize;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
}
LocalLayer::~LocalLayer() {
delete _padding;
delete _stride;
delete _filterSize;
delete _channels;
delete _imgSize;
delete _groups;
delete _filterChannels;
delete _filterPixels;
delete _imgPixels;
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, true) {
_sumWidth = pyDictGetInt(paramsDict, "sumWidth");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
_weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin");
_weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax");
_flipWeights = pyDictGetInt(paramsDict, "flipWeights");
}
ConvLayer::~ConvLayer() {
delete _weightContrastNormMin;
delete _weightContrastNormMax;
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(getBiasMatrix(passType));
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(getBiasMatrix(passType));
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
float scaleBGrad = getBGradScale(passType);
float scaleInc = getBIncScale();
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
assert(_weights->at(inpIdx).isUseGrad());
bool doPartialSum = _sumWidth < _modulesX;
//NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad();
NVMatrix& tgt = doPartialSum ? _weightGradTmp : getWeightGrad(passType, inpIdx);
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum;
convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad);
if (doPartialSum) {
scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0;
int outWidth = DIVUP(_modulesX, _sumWidth);
_weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
//_weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
//_weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
NVMatrix& tgt2 = getWeightGrad(passType, inpIdx);
tgt2.addSum(_weightGradTmp, 0, scaleTargets, 1);
tgt2.reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
setWeightGrad(passType, inpIdx, scaleTargets);
}
void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
_weightGradTmp.truncate();
}
void ConvLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
float fz = _weights->at(i).getW().getNumRows();
NVMatrix tmp;
_weights->at(i).getW().sum(0, tmp);
_weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad());
// Now _weights->at(i).getGrad() contains zero-mean filters
_weights->at(i).getGrad().apply(NVMatrixOps::Square());
_weights->at(i).getGrad().sum(0, tmp);
tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz));
// Now tmp has the stdev
_weights->at(i).getW().eltwiseMultByVector(tmp);
}
// It's pretty silly to do both these things but whatever
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2;
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
NVMatrix& ConvLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) {
if( !_flipWeights ) return _weights->at(inpIdx).getW();
_weights->at(inpIdx).getW().flipSquare(0,_weightsFlipped);
return _weightsFlipped;
}
NVMatrix& ConvLayer::getWeightGrad(PASS_TYPE passType, int inpIdx) {
if( !_flipWeights ) return _weights->at(inpIdx).getGrad();
_weights->at(inpIdx).getGrad().flipSquare(0,_weightsGradFlipped);
return _weightsGradFlipped;
}
void ConvLayer::setWeightGrad(PASS_TYPE passType, int inpIdx, float scaleTargets) {
if( !_flipWeights ) return;
_weightsGradFlipped.flipSquare(0,_weightsGradUnflipped);
_weights->at(inpIdx).getGrad().add(_weightsGradUnflipped, scaleTargets, 1);
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType));
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType));
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void LocalUnsharedLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall());
}
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& input = *_inputs[0];
input.max(1, _max);
input.addVector(_max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
getActs().sum(1, _sum);
getActs().eltwiseDivideByVector(_sum);
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
LayerV& prev = _prev[replicaIdx];
if (_doUpperGrad) {
// Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense
for (int i = 0; i < _next.size(); ++i) {
if (_next[i]->isGradProducer(getName())) {
NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels
float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
break;
}
}
} else {
computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1);
}
}
void SoftmaxLayer::setDoUpperGrad(bool b) {
_doUpperGrad = b;
}
/*
* =======================
* ConcatenationLayer
* =======================
*/
ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
_copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets");
_copyOffsets->push_back(_numOutputs);
}
ConcatenationLayer::~ConcatenationLayer() {
delete _copyOffsets;
}
void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols());
_inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0);
}
void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view
_prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1);
delete ©Src;
}
/*
* =======================
* PassThroughLayer
* =======================
*/
PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
}
void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// No-op
}
void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// No-op
}
bool PassThroughLayer::postInit() {
if (Layer::postInit()) {
assert(getNumInputReplicas() == 1);
for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) {
MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
_prev[0][i]->setMemorySourceActs(getDeviceID(), vActs);
_prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad);
}
return true;
}
return false;
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
EltwiseSumLayer::~EltwiseSumLayer() {
delete _coeffs;
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx));
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DropoutLayer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans) {
_enable = pyDictGetInt(paramsDict, "enable");
_keep = pyDictGetFloat(paramsDict, "keep");
}
void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.apply(DropoutSmallerThanOperator(_keep));
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->copy(getActs());
}
}
void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1);
}
}
void DropoutLayer::truncBwdActs() {
Layer::truncBwdActs();
_keepMask.truncate();
}
/*
* =======================
* Dropout2Layer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID, false) {
}
void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.smallerThanScalar(_keep);
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->scale(_keep, getActs());
}
}
void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
if (scaleTargets != 0) {
v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)),
prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.scale(_keep, prev[inpIdx]->getActsGrad());
}
}
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
_start = pyDictGetInt(paramsDict, "start");
_end = pyDictGetInt(paramsDict, "end");
_useBuffer = false;
_outstandingCopyRequest = false;
_convNet = convNet;
}
DataLayer::~DataLayer() {
for (map<int,hipStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) {
checkCudaErrors(hipStreamDestroy(it->second));
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
_copier->stop();
delete _copier;
}
void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) {
waitForCopyFinish();
if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) {
_useBuffer = !_useBuffer;
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
void DataLayer::waitForCopyFinish() {
if (_outstandingCopyRequest) {
_copyFinishQueue.dequeue();
assert(_copyFinishQueue.getNumElements() == 0);
_outstandingCopyRequest = false;
}
}
hipStream_t DataLayer::getCopyStream(int deviceID) {
if (_copyStreams.count(deviceID) == 0) {
NVMatrix::setDeviceID(deviceID);
checkCudaErrors(hipStreamCreateWithFlags(&_copyStreams[deviceID], hipStreamNonBlocking));
}
return _copyStreams[deviceID];
}
void DataLayer::copyData(CPUData& data, bool other, int passIdx) {
assert(!_outstandingCopyRequest);
assert(_copyFinishQueue.getNumElements() == 0);
_copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx));
_outstandingCopyRequest = true;
}
int DataLayer::getNumInputReplicas() {
return _convNet->getNumReplicasMax() / getNumReplicas();
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
}
NVMatrix& DataLayer::getActs(int deviceID) {
return getActs(deviceID, false, -1);
}
NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) {
// printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases);
assert(_memSrcActs.count(deviceID) > 0);
assert(_memSrcActs2.count(deviceID) > 0);
return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases));
}
ConvNet& DataLayer::getConvNet() {
return *_convNet;
}
bool DataLayer::postInit() {
if (Layer::postInit()) {
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
if (_memSrcActs2.count(d) == 0) {
_memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
intv cpus = getDeviceCPUs(_next[0]->getDeviceID());
_copier = new DataCopyThread(*this, cpus);
_copier->start();
return true;
}
return false;
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =======================
* DataCopyThread
* =======================
*/
DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) {
}
Queue<DataCopyMessage*>& DataCopyThread::getQueue() {
return _queue;
}
void DataCopyThread::stop() {
getQueue().enqueue(new DataCopyExitMessage());
join();
}
void* DataCopyThread::run() {
NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin());
bool exit = false;
while(!exit) {
DataCopyMessage& msg = *_queue.dequeue();
exit = msg.getType() == DataCopyMessage::EXIT;
if (!exit) {
CPUData& data = msg.getData();
int passIdx = msg.getPassIdx();
bool other = msg.isOther();
Matrix& dataMatrix = data.getData(_parent->getDataIdx());
// How many times is this layer going to process microbatches from this minibatch?
assert(_parent->getNumReplicasNext() == _parent->getNumReplicas());
int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx);
if (microIdx >= 0) {
if (_requestTimer.isStarted()) {
double requestIntervalMsec = _requestTimer.stop();
// Sleep for up to 1/20th the average request interval
_sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0));
}
_requestTimer.start();
if (other) {
// Sleeping a bit is helpful because in typical nets, copying input data
// as soon as it's available will produce contention with other communications
// that are happening at the time. This is very much a hack, so in the future
// it might be good to replace it with something smarter which schedules access
// to communication links.
usleep(_sleepUsec);
}
microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas();
// Safer to divup because this way you won't get a minibatch size of 0
int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax());
int microStart = microIdx * microbatchSize;
int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize);
// Check that this replica has some data. This can be false when, for example,
// there are only 7 examples in the minibatch but 8 replicas.
if (microStart < microEnd) {
assert(dataMatrix.isView() == dataMatrix.isTrans());
int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2);
if (dataMatrix.isTrans()) {
Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd);
// In this case, dataMatrix is a view on memory allocated by Python.
//_hostMemFwd.copyFromHost(replicaDataMatrix, true);
_hostMemFwd.resize(replicaDataMatrix.getNumRows(), replicaDataMatrix.getNumCols(), true);
memcpy(_hostMemFwd.getDevData(), replicaDataMatrix.getData(), replicaDataMatrix.getNumDataBytes());
delete &replicaDataMatrix; // view
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
// Note to self: this is the path that gets executed in practice
// in my models. It does a transpose & copy simultaneously.
hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
} else {
// Hacky way to copy a slice to _hostMemFwd
_hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart);
Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans());
dataMatrix.sliceCols(microStart, microEnd, tmp);
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
}
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
NVMatrix::setDeviceID(deviceID);
NVMatrix::syncStream(_parent->getCopyStream(deviceID));
}
_parent->getConvNet().getDataCopyPD().freePipe(pipe);
} else {
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
_parent->getActs(deviceID, other, 0);
}
}
}
_parent->getCopyFinishQueue().enqueue(1);
}
delete &msg;
}
return NULL;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false);
} else if(_pool == "maxabs") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) {
_sum = pyDictGetInt(paramsDict, "sum");
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_sum) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler<true>());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler<false>());
}
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, _sum, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_abs) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* CrossMapPoolLayer
* =====================
*/
CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputs = pyDictGetInt(paramsDict, "outputChannels");
_pool = pyDictGetString(paramsDict, "pool");
}
CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* CrossMapMaxPoolLayer
* =====================
*/
CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler());
}
void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
/*
* =====================
* RandomScaleLayer
* =====================
*/
RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_maxScale = pyDictGetFloat(paramsDict, "maxScale");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
// The smallest size the image could be after rescaling
_minScaledSize = _imgSize / _maxScale;
// The number of discrete scales we're considering
int numScales = _imgSize - _minScaledSize + 1;
// The total number of squares of size _tgtSize that we can extract
// from all these scales
double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6;
// For each scale, record the fraction of the squares that it has.
// This will be the probability of sampling this scale.
_scaleProbs.push_back(1.0 / numCrops);
for (int s = 1; s < numScales; ++s) {
_scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops);
}
}
void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (IS_TRAIN(passType)) {
// _maxScale is in the range [1, 2)
float r = randf;
int rescaledSize = _tgtSize;
float scaleFactor = _maxScale;
// Find which scale we have sampled
for (int s = 0; s < _scaleProbs.size(); ++s) {
if (r <= _scaleProbs[s]) {
rescaledSize += s;
float scaleFactorEnd = _imgSize / float(rescaledSize);
float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize));
scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart);
break;
}
}
assert(rescaledSize >= _tgtSize);
int maxStart = rescaledSize - _tgtSize;
int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart);
if (rescaledSize == _imgSize) {
convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX);
} else {
convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor);
convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX);
}
_rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it.
} else if (IS_MULTIVIEW_TEST(passType)) { // for now...
_inputs[0]->copy(getActs());
} else if (IS_TEST(passType)) { // Test on center patch
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale);
}
}
void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* CropLayer
* =====================
*/
CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_startX = pyDictGetInt(paramsDict, "startX");
_startY = pyDictGetInt(paramsDict, "startY");
_tgtSize = pyDictGetInt(paramsDict, "sizeX");
}
void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX);
}
void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
GaussianBlurLayer::~GaussianBlurLayer() {
delete _hFilter;
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad();
convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1);
convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* HorizontalReflectionLayer
* =====================
*/
HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
assert(_channels >= 1 && _channels <= 3);
}
void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convReflectHorizontal(*_inputs[0], getActs(), _imgSize);
}
void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
_minDiv = pyDictGetFloat(paramsDict, "minDiv");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
_denoms.truncate();
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: ResponseNormLayer(convNetThread, paramsDict, replicaID) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
assert(inpIdx == 0);
convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) {
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler<false>());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
_meanDiffs.truncate();
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
_numCases = 0;
_aggregated = pyDictGetInt(paramsDict, "aggregated") != 0;
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
if (_coeff != 0) {
Layer::bprop(v, passType, passIdx);
}
}
bool CostLayer::fprop(PASS_TYPE passType, int passIdx) {
if (Layer::fprop(passType, passIdx)) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL));
return true;
}
return false;
}
void CostLayer::fpropCommon(PASS_TYPE passType) {
_numCases = Layer::getNumCases(*_inputs[0]);
}
int CostLayer::getNumCases() {
return _numCases;
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
return *new doublev(_costv);
}
// This is called between microbatches
void CostLayer::resetPassIdx() {
Layer::resetPassIdx();
_costv.clear();
}
CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) {
if (type == "cost.crossent") {
return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.bce") {
return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.dce") {
return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.logreg") {
return *new LogregCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.ice") {
return *new IndepCrossEntCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.normp") {
return *new NormPCostLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown cost layer type ") + type;
}
/*
* =====================
* CrossEntCostLayer
* =====================
*/
CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
computeCrossEntCost(labels, probs, _trueLabelLogProbs, _correctProbs);
_costv.clear();
_costv.push_back(-_trueLabelLogProbs.sum());
_costv.push_back(numCases - _correctProbs.sum());
}
}
void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID();
if (doWork) {
computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* BinomialCrossEntropyCostLayer
* =====================
*/
BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate");
_posWeight = pyDictGetFloat(paramsDict, "posWeight");
}
void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs);
_costv.clear();
// Cross-entropy cost
_costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim());
// If aggregated, we don't produce these outputs because they're not additive.
// They have no meaning if this is just a partial cost.
if (!_aggregated) {
// "Correct" classifications. To compute these we threshold probs
// and just count the number of entries that agree with labels.
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.equals(labels);
_costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim()));
if (_computeSoftmaxErrorRate) {
// Also compute top-1 error as if this is softmax and there's only one correct class
probs.max(0, _tmpVec);
assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis
probs.equalsVector(_tmpVec, _correctProbs);
_correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present
float m = _tmpVec.max();
_correctProbs.eltwiseDivideByVector(_tmpVec);
_correctProbs.eltwiseMult(labels);
_costv.push_back(numCases - _correctProbs.sum(_tmpbuf));
}
}
}
}
void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1
|| prev[1]->getType() != "neuron"
|| static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic"
|| prev[1]->getDeviceID() != getDeviceID()
|| prev[1]->getNumReplicas() != getNumReplicas();
if (doWork) {
printf("Computing cross-entropy gradient the stupid way\n");
if (scaleTargets == 0) {
labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target);
} else {
labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target);
}
}
}
float BinomialCrossEntropyCostLayer::getPosWeight() {
return _posWeight;
}
/*
* =====================
* DetectionCrossEntropyCostLayer
* =====================
*/
DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) {
assert(!_aggregated);
}
void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx);
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
/*
* Add information sufficient to compute precision and recall for each class.
*/
// NOTE: _tmpProbs contains ((probs > 0.5) == labels)
labels.sum(1, _numPositive); // sum(labels, 1)
_tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels)
_tmpProbs.sum(1, _numTruePositive);
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.sum(1, _numDeclaredPositive);
_numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true);
_numPositive.copyToHost(_hNumPositive, true);
_numTruePositive.copyToHost(_hNumTruePositive, true);
for (int i = 0; i < labels.getFollowingDim(); ++i) {
_costv.push_back(_hNumDeclaredPositive(i, 0)); // 2
_costv.push_back(_hNumPositive(i, 0)); // 3
_costv.push_back(_hNumTruePositive(i, 0)); // 4
}
}
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_topk = pyDictGetInt(paramsDict, "topk");
// _numAccumed = 0;
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix* probs = _inputs[1];
_doCompute = !IS_MULTIVIEW_TEST(passType);
if (!_doCompute) {
if (IS_MULTIVIEW_TEST_START(passType)) {
if (_probsAccum.count(passIdx) == 0) {
_probsAccum[passIdx] = new NVMatrix(*probs);
}
probs->copy(*_probsAccum[passIdx]);
_numAccumed[passIdx] = 1;
} else {
_probsAccum[passIdx]->add(*probs);
_numAccumed[passIdx] += 1;
}
if (IS_MULTIVIEW_TEST_END(passType)) {
probs = _probsAccum[passIdx];
probs->scale(1.0 / _numAccumed[passIdx]);
_doCompute = true;
}
}
if (_doCompute) {
int numCases = labels.getNumElements();
probs->max(0,_maxProbs);
if (_topk == 1) {
computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs);
} else {
computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk);
}
_costv.clear();
double top1 = _correctProbs.sum(_tmpbuf);
_costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf));
_costv.push_back(numCases - top1);
_costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf)));
}
}
}
NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) {
return *_probsAccum[replicaIdx];
}
void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax"
|| prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas();
if (prev[1]->getType() == "softmax") {
static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork);
}
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_divideInput = pyDictGetFloat(paramsDict, "divideInput");
_alpha = pyDictGetFloat(paramsDict, "alpha"); // just used to scale cost, use coeff to scale gradients
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_inputs[0]->scale(1.0/_divideInput, _tmp); _tmp.apply(NVMatrixOps::Square());
float cost = 0.5*_tmp.sum()*_alpha; _costv.clear(); _costv.push_back(cost);
//printf("cost %s %g\n",_name.c_str(),cost);
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -1.0 / _divideInput / _divideInput * _coeff );
}
/*
* =====================
* NormPCostLayer
* =====================
*/
NormPCostLayer::NormPCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_p = pyDictGetInt(paramsDict, "p");
_divideInput = pyDictGetFloat(paramsDict, "divideInput");
_alpha = pyDictGetFloat(paramsDict, "alpha"); // just used to scale cost, use coeff to scale gradients
}
void NormPCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_inputs[0]->scale(1.0/_divideInput, _tmp);
if( _p==1 ) {
_tmp.apply(NVMatrixOps::Abs()); // L1 norm just abs
} else if( _p ==2 ) {
_tmp.apply(NVMatrixOps::Square()); // same as SumOfSquares
} else if( _p % 2 == 0) {
_tmp.apply(NVMatrixOps::Pow((float)_p)); // even, no abs needed
} else {
_tmp.apply(NVMatrixOps::PowAbs(_p));
}
float cost = _tmp.sum() / _p * _alpha; _costv.clear(); _costv.push_back(cost);
//printf("cost %s %g\n",_name.c_str(),cost);
}
void NormPCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// |x| derivative is approximately sign(x) (ignore zero)
if( _p == 1 ) {
_inputs[0]->apply(NVMatrixOps::Sign(), _tmpGrad1);
_prev[replicaIdx][inpIdx]->getActsGrad().add(_tmpGrad1, scaleTargets, 1.0 / _divideInput / _divideInput * _coeff );
} else if( _p ==2 ) {
// same as SumOfSquares, but specify sign with coeff
_prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, 1.0 / _divideInput / _divideInput * _coeff );
} else {
_inputs[0]->scale(1.0/_divideInput, _tmpGrad1); _tmpGrad1.apply(NVMatrixOps::Sign(), _tmpGrad2);
if( _p == 3 ) {
_tmpGrad1.apply(NVMatrixOps::Square());
} else if( _p % 2 == 1 ) {
_tmpGrad1.apply(NVMatrixOps::Pow((float)_p-1));
} else {
_tmpGrad1.apply(NVMatrixOps::PowAbs((float)_p-1));
}
_tmpGrad1.eltwiseMult(_tmpGrad2);
_prev[replicaIdx][inpIdx]->getActsGrad().add(_tmpGrad1, scaleTargets, 1.0 / _divideInput * _coeff );
}
}
/*
* =====================
* UnPoolLayer
* =====================
*/
UnPoolLayer::UnPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_pool = pyDictGetString(paramsDict, "pool");
//_scale = pyDictGetFloat(paramsDict, "scale");
_scale = 1.0;
}
UnPoolLayer& UnPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxUnPoolLayer(convNetThread, paramsDict, replicaID, false);
} else if(_pool == "maxabs") {
return *new MaxUnPoolLayer(convNetThread, paramsDict, replicaID, true);
// xxx - not sure about this, also not using it
//} else if(_pool == "avg") {
// assert(0);
// return *new AvgUnPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown unpooling layer type ") + _pool;
}
/*
* =====================
* AvgUnPoolLayer
* =====================
*/
/*
AvgUnPoolLayer::AvgUnPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : UnPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void AvgUnPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
//convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
convLocalPool(*_inputs[0], _tmppool, _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
convLocalAvgUndo(_tmppool, getActs(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
void AvgUnPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
//convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
convLocalPool(v, _tmppool, _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
convLocalAvgUndo(_tmppool, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
*/
/*
* =====================
* MaxUnPoolLayer
* =====================
*/
MaxUnPoolLayer::MaxUnPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : UnPoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) {
}
void MaxUnPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_abs) {
//convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
convLocalPool(*_inputs[0], _tmppool, _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
} else {
//convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
convLocalPool(*_inputs[0], _tmppool, _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
convLocalMaxUndo(*_inputs[0], _tmppool, _tmppool, getActs(), _sizeX, _start, _stride, _outputsX, scaleTargets, _scale);
}
//void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target,
//void convLocalMaxUnpool(NVMatrix& images, NVMatrix& grads, NVMatrix& maxActs, NVMatrix& target,
void MaxUnPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
//convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
convLocalMaxUnpool(*_inputs[0], v, _tmppool, _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, _scale);
}
/*
* =====================
* CrossMapUnPoolLayer
* =====================
*/
CrossMapUnPoolLayer::CrossMapUnPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputs = pyDictGetInt(paramsDict, "outputChannels");
_pool = pyDictGetString(paramsDict, "pool");
}
CrossMapUnPoolLayer& CrossMapUnPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new CrossMapMaxUnPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown unpooling layer type ") + _pool;
}
/*
* =====================
* CrossMapMaxUnPoolLayer
* =====================
*/
CrossMapMaxUnPoolLayer::CrossMapMaxUnPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapUnPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossMapMaxUnPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
//convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler());
convPoolCrossMap(*_inputs[0], _tmppool, _start, _size, _outputs, _stride, _imgSize, MaxPooler());
convCrossMapMaxPoolUndo(*_inputs[0], _tmppool, _tmppool, getActs(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
void CrossMapMaxUnPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
//convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
convCrossMapMaxUnPool(*_inputs[0], v, _tmppool, _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
/*
* =======================
* ConvDecoderLayer
* =======================
*/
ConvDecoderLayer::ConvDecoderLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: ConvLayer(convNetThread, paramsDict, replicaID) {
_nPadFilters = pyDictGetInt(paramsDict, "nPadFilters");
_sumFeatures = pyDictGetInt(paramsDict, "sumFeatures"); // incorporates summing over feature maps (per channel)
}
void ConvDecoderLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
int nchans = _channels->at(inpIdx); int imgPixels = _imgPixels->at(inpIdx);
int fc = _filterChannels->at(inpIdx); int chanPixels = fc*imgPixels; int tgtStep = _sumFeatures ? 0 : chanPixels;
int fPixels = _filterPixels->at(inpIdx); //int padPixels = _nPadFilters*imgPixels;
assert(_numFilters==nchans*fc); // decoding same filters from previous with specified number of channels
_tmpWeights.resize(fPixels, _nPadFilters); _tmpWeights.zero(); NVMatrix& wSlcOut = _tmpWeights.sliceCols(0,fc);
NVMatrix &weightMatrix = getWeightMatrix(passType, inpIdx);
for( int i=0, j=0, k=0, m=0; i < nchans; i++, j+=imgPixels, k+=fc, m+=tgtStep ) {
NVMatrix& inpSlc = _inputs[inpIdx]->sliceRows(j,j+imgPixels);
// only want to convolve each channel with specified filters per channel, pad with zeros for required filler
NVMatrix& wSlcIn = weightMatrix.sliceCols(k,k+fc); wSlcIn.copy(wSlcOut);
convFilterActs(inpSlc, _tmpWeights, _tmpTarget, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), 1, _groups->at(inpIdx), 0, 1);
NVMatrix& tgtSlcIn = _tmpTarget.sliceRows(0,chanPixels);
NVMatrix& tgtSlcOut = getActs().sliceRows(m,m+chanPixels);
tgtSlcOut.add(tgtSlcIn, scaleTargets || (_sumFeatures && i>0), 1);
//syncStream(); // no other choice for this crappy method
delete &inpSlc; delete &wSlcIn; delete &tgtSlcIn; delete &tgtSlcOut;
}
delete &wSlcOut;
if (scaleTargets == 0) {
if (_sharedBiases) {
int nFilters = _sumFeatures ? fc : _numFilters;
getActs().reshape(nFilters, getActs().getNumElements() / nFilters);
getActs().addVector(getBiasMatrix(passType));
getActs().reshape(nFilters * _modules, getActs().getNumElements() / (nFilters * _modules));
} else {
getActs().addVector(getBiasMatrix(passType));
}
}
}
void ConvDecoderLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int fc = _filterChannels->at(0); //int chanPixels = fc*imgPixels; int tgtStep = _sumFeatures ? 0 : chanPixels;
float scaleBGrad = getBGradScale(passType);
float scaleInc = getBIncScale();
if (_sharedBiases) {
int nFilters = _sumFeatures ? fc : _numFilters;
v.reshape(nFilters, v.getNumElements() / nFilters);
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
v.reshape(nFilters * _modules, v.getNumElements() / (nFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
}
}
void ConvDecoderLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
assert(_weights->at(inpIdx).isUseGrad());
bool doPartialSum = _sumWidth < _modulesX;
//NVMatrix& tgt = doPartialSum ? _weightGradTmp : getWeightGrad(passType, inpIdx);
NVMatrix& tgt = doPartialSum ? _tmpGradP : _tmpGrad;
int outWidth = DIVUP(_modulesX, _sumWidth);
//if( doPartialSum ) _tmpGrad.resize(_filterPixels->at(inpIdx), _nPadFilters);
float scaleWGrad = getGradScale(inpIdx, passType);
//float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum;
float scaleTargets = getIncScale(inpIdx, passType);
int nchans = _channels->at(inpIdx); int imgPixels = _imgPixels->at(inpIdx);
int fc = _filterChannels->at(inpIdx); int chanPixels = fc*imgPixels; int tgtStep = _sumFeatures ? 0 : chanPixels;
int fPixels = _filterPixels->at(inpIdx); int padPixels = _nPadFilters*imgPixels;
assert(_numFilters==nchans*fc); // decoding same filters from previous with specified number of channels
_tmpVbw.resize(padPixels, v.getNumCols()); _tmpVbw.zero(); NVMatrix& vSlcOut = _tmpVbw.sliceRows(0,chanPixels);
NVMatrix &gradMatrix = getWeightGrad(passType, inpIdx);
for( int i=0, j=0, k=0, m=0; i < nchans; i++, j+=imgPixels, k+=fc, m+=tgtStep ) {
NVMatrix& inpSlc = _inputs[inpIdx]->sliceRows(j,j+imgPixels);
// only want to convolve each channel with specified filters per channel, pad with zeros for required filler
NVMatrix& vSlcIn = v.sliceRows(m,m+chanPixels); vSlcIn.copy(vSlcOut);
convWeightActs(inpSlc, _tmpVbw, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx),
_padding->at(inpIdx), _stride->at(inpIdx), 1, _groups->at(inpIdx), _sumWidth, 0, scaleWGrad);
if (doPartialSum) {
_tmpGradP.reshape(outWidth*outWidth, fPixels * _nPadFilters);
_tmpGradP.sum(0, _tmpGrad);
_tmpGrad.reshape(fPixels, _nPadFilters);
}
NVMatrix& gSlcIn = _tmpGrad.sliceCols(0,fc);
NVMatrix& gSlcOut = gradMatrix.sliceCols(k,k+fc);
gSlcOut.add(gSlcIn, scaleTargets, 1);
//syncStream(); // no other choice for this crappy method
delete &inpSlc; delete &vSlcIn; delete &gSlcIn; delete &gSlcOut;
}
delete &vSlcOut;
setWeightGrad(passType, inpIdx, scaleTargets);
}
void ConvDecoderLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
int nchans = _channels->at(inpIdx); int imgPixels = _imgPixels->at(inpIdx);
int fc = _filterChannels->at(inpIdx); int chanPixels = fc*imgPixels; int tgtStep = _sumFeatures ? 0 : chanPixels;
int fPixels = _filterPixels->at(inpIdx); int padPixels = _nPadFilters*imgPixels;
assert(_numFilters==nchans*fc); // decoding same filters from previous with specified number of channels
_tmpWeights.resize(fPixels, _nPadFilters); _tmpWeights.zero(); NVMatrix& wSlcOut = _tmpWeights.sliceCols(0,fc);
_tmpVbp.resize(padPixels, v.getNumCols()); _tmpVbp.zero(); NVMatrix& vSlcOut = _tmpVbp.sliceRows(0,chanPixels);
NVMatrix &weightMatrix = getWeightMatrix(passType, inpIdx);
for( int i=0, j=0, k=0, m=0; i < nchans; i++, j+=imgPixels, k+=fc, m+=tgtStep ) {
// only want to convolve each channel with specified filters per channel, pad with zeros for required filler
NVMatrix& wSlcIn = weightMatrix.sliceCols(k,k+fc); wSlcIn.copy(wSlcOut);
NVMatrix& vSlcIn = v.sliceRows(m,m+chanPixels); vSlcIn.copy(vSlcOut);
convImgActs(_tmpVbp, _tmpWeights, _tmpTarget, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), 1, _groups->at(inpIdx), 0, 1);
NVMatrix& tgtSlcIn = _tmpTarget.sliceRows(0,imgPixels);
NVMatrix& tgtSlcOut = _prev[replicaIdx][inpIdx]->getActsGrad().sliceRows(j,j+imgPixels);
tgtSlcOut.add(tgtSlcIn, scaleTargets, 1);
//syncStream(); // no other choice for this crappy method
delete &wSlcIn; delete &vSlcIn; delete &tgtSlcIn; delete &tgtSlcOut;
}
delete &wSlcOut; delete &vSlcOut;
}
/*
* =======================
* SumLayer
* =======================
*/
SumLayer::SumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true) {
_stride = pyDictGetInt(paramsDict, "stride");
_noutputs = pyDictGetInt(paramsDict, "outputs");
}
void SumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
//getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx));
//int nInputs = _inputs[inpIdx]->getNumCols(), nCases = _inputs[inpIdx]->getNumRows();
NVMatrix& slc = _inputs[inpIdx]->sliceCols(0,_noutputs); _cursum.add(slc, 0, 1);
for( int i=_noutputs, j=1; j<_stride; i+=_noutputs, j++ ) {
NVMatrix& slc2 = _inputs[inpIdx]->sliceCols(i,i+_noutputs); _cursum.add(slc, 1, 1);
//syncStream(); // no other choice for this crappy method
delete &slc2;
}
delete &slc;
getActs().add(_cursum, scaleTargets, 1);
}
void SumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
//_prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
NVMatrix &target = _prev[replicaIdx][inpIdx]->getActsGrad();
for( int i=0, j=0; j<_stride; i+=_noutputs, j++ ) {
NVMatrix& slc = target.sliceCols(i,i+_noutputs); slc.add(v, scaleTargets, 1);
//syncStream(); // no other choice for this crappy method
delete &slc;
}
}
/*
* =====================
* WhitenLayer
* =====================
*/
// needs to be a transpose layer because acting similar as a weight layer (using whitening matrix instead of weights)
WhitenLayer::WhitenLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true), TwoDLayerInterface(paramsDict) {
_hwhiten = pyDictGetMatrix(paramsDict, "whiten");
_whiten = NULL;
_wmean = pyDictGetFloat(paramsDict, "wmean");
_wstd = pyDictGetFloat(paramsDict, "wstd");
}
WhitenLayer::~WhitenLayer() {
delete _hwhiten;
}
void WhitenLayer::copyToGPU() {
_whiten.copyFromHost(*_hwhiten, true);
}
void WhitenLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
//convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
//convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
if( _channels == 1 ) {
getActs().addScalar(-_wmean); getActs().addProduct(*_inputs[inpIdx], _whiten, scaleTargets, 1/_wstd);
} else {
for( int i=0, j=0; i < _channels; i++, j+=_imgPixels ) {
NVMatrix& inpSlc = _inputs[inpIdx]->sliceRows(j,j+_imgPixels);
NVMatrix& tgtSlc = getActs().sliceRows(j,j+_imgPixels);
tgtSlc.addScalar(-_wmean); tgtSlc.addProduct(inpSlc, _whiten, scaleTargets, 1/_wstd);
delete &inpSlc, &tgtSlc;
}
}
}
void WhitenLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad();
// OR?
//NVMatrix& tgt = _prev[replicaIdx][inpIdx]->getActsGrad();
//convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1);
//convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1);
if( _channels == 1 ) {
tgt.addProduct(v, _whiten.getTranspose(), scaleTargets, _wstd); tgt.addScalar(_wmean);
} else {
for( int i=0, j=0; i < _channels; i++, j+=_imgPixels ) {
NVMatrix& inpSlc = v.sliceRows(j,j+_imgPixels);
NVMatrix& tgtSlc = tgt.sliceRows(j,j+_imgPixels);
tgtSlc.addProduct(inpSlc, _whiten.getTranspose(), scaleTargets, _wstd); tgtSlc.addScalar(_wmean);
delete &inpSlc, &tgtSlc;
}
}
}
/*
* =====================
* IndepCrossEntCostLayer
* =====================
*/
IndepCrossEntCostLayer::IndepCrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: CostLayer(convNetThread, paramsDict, replicaID, false) {
}
// see Bishop 6.8, and eq 6.163
void IndepCrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& inputs = *_inputs[1];
NVMatrix& probs = getActs();
//int numCases = probs.getLeadingDim();
//int numOut = probs.getFollowingDim();
// http://lingpipe-blog.com/2012/02/16/howprevent-overflow-underflow-logistic-regression/
inputs.apply(NVMatrixOps::Logistic(), probs); inputs.apply(NVMatrixOps::LogLogistic(), _logProbs);
computeIndepCrossEntCost(labels, probs, _logProbs, _logError, _correctProbs);
_costv.clear(); _costv.push_back(-_logError.sum()); _costv.push_back(_numCases - _correctProbs.sum());
}
}
void IndepCrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets,
PASS_TYPE passType) {
assert(inpIdx == 1);
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = getActs();
NVMatrix& target = _prev[replicaIdx][1]->getActsGrad();
/* Could never resolve why this has to be, but independent cost only works (for labels generated in C-order with
* batches along the columns) if this layer is a NOT a transpose layer. If set as a transpose layer, it still works
* unless the activity matrix is square (noutputs == nminibatch). This worked fine in the old cuda convnet and
* after much digging was unable to figure out why it doesn't work in cuda convnet2. */
//labels.transpose(_trans); // this did not help
// all other cost layers are not transpose layers, so basically dropped the idea of supporting tranpose layer here.
// was a throwback to original implementation of independent outputs in first cuda-convnet anyways
computeIndepCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
| ee0c3d81ab981cd48af056cc5a5f2c26fba5e06b.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <helper_cuda.h>
#include <iostream>
#include <set>
#include "../../cudaconv3/include/cudaconv2.cuh"
#include "../../util/include/matrix.h"
#include "../include/layer_kernels.cuh"
#include "../include/layer.cuh"
#include "../include/data.cuh"
#include "../include/util.cuh"
#include "../include/weights.cuh"
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) :
_convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_numOutputs = pyDictGetInt(paramsDict, "outputs");
_numReplicas = pyDictGetInt(paramsDict, "numReplicas");
_numReplicasPrev = 1;
_rcvdBInputMsgs = 0;
_actBroadcaster = NULL;
_gradReducer = NULL;
_initialized = false;
}
Layer::~Layer() {
if (_actBroadcaster != NULL) {
_actBroadcaster->stop();
delete _actBroadcaster;
}
if (_gradReducer != NULL) {
_gradReducer->stop();
delete _gradReducer;
}
// For now, gradReducer doesn't have a destructor
// delete _gradReducer;
for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
}
cudaStream_t Layer::getStream() {
assert(getDeviceID() >= 0);
return NVMatrix::getDefaultStream(getDeviceID());
}
void Layer::syncStream() {
NVMatrix::syncStream(getStream());
}
void Layer::fpropNext(PASS_TYPE passType, int passIdx) {
if (_next.size() > 0) {
if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining
if (_nextDeviceIDs.size() > 1 || (_nextDeviceIDs.size() == 1 && _nextDeviceIDs[0] != getDeviceID())) {
syncStream(); // Make sure I've finished computing before broadcasting
}
getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue));
}
if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) {
_broadcastFinishQueue.dequeue();
assert(_broadcastFinishQueue.getNumElements() == 0);
}
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
bool Layer::fprop(PASS_TYPE passType, int passIdx) {
_rcvdFInputMsgs++;
// I require messages from *all* input replicas because it makes the propagation easier to think about.
// Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation
// might not actually be finished yet.
if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) {
//printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID());
int ridx = getFwdActiveInputReplicaIdx(passIdx);
assert(getDeviceID() == NVMatrix::getDeviceID());
map<int, NVMatrix*> v;
if (ridx >= 0) {
for (int i = 0; i < getNumLayersPrev(); i++) {
v[i] = &_prev[ridx][i]->getActs(getDeviceID());
}
}
fprop(v, passType, passIdx);
return true;
}
return false;
}
void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) {
if (getFwdActiveInputReplicaIdx(passIdx) >= 0) {
assert(v.size() == getNumLayersPrev());
_inputs.clear();
_inputs.insert(v.begin(), v.end());
int numCases = _inputs[0]->getLeadingDim();
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemory(numCases);
}
if (numCases > 0) {
//printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases);
_rcvdFInputMsgs = getNumExpectedFwdMsgs();
for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) {
it->second->transpose(_trans);
}
getActs().transpose(_trans);
fpropCommon(passType);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType, passIdx);
}
// Then add the rest of the inputs to that
for (int i = 0; i < getNumLayersPrev(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx);
}
}
}
}
fpropNext(passType, passIdx);
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_actsGradTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
if (_actsTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
}
int Layer::getNumGradProducersNext() {
return _numGradProducersNext;
}
int Layer::getNumExpectedBwdMsgs() {
return _numGradProducersNext * getNumSiblingReplicas();
}
int Layer::getNumExpectedFwdMsgs() {
return getNumLayersPrev() * getNumInputReplicas();
}
void Layer::bprop(PASS_TYPE passType, int passIdx) {
if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) {
// printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID());
if (_gradReducer != NULL) {
_gradReducer->waitForFinish();
}
// This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages
bprop(getActsGrad(), passType, passIdx);
if (_bwdTerminal[passIdx]) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL));
}
}
}
void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) {
Layer& prev = *_prev[replicaIdx][inputIdx];
if (prev.isGradConsumer() && isGradProducer(prev.getName())) {
if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0
bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0, passType);
}
prev.getNumComputedActsGrads(getDeviceID())++;
// Synchronize if the previous layer is going to actually do a reduction.
// If the previous layer is on the same GPU as us and has no next layers
// on other GPUs then it won't need to do a reduction.
if (prev.getNextDeviceIDs().size() > 1 || (prev.getNextDeviceIDs().size() == 1 && getDeviceID() != prev.getDeviceID())) {
syncStream();
}
prev.getGradReducer().enqueueReduction(getDeviceID());
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
v.transpose(_trans);
assert(getDeviceID() == NVMatrix::getDeviceID());
int ridx = getBwdActiveInputReplicaIdx(passIdx);
LayerV& prev = _prev[ridx];
map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx];
for (int i = 0; i < prev.size(); i++) {
_inputs[i]->transpose(_trans);
prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
// NOTE: this should be here (before the bpropActs) because if you have a layer
// that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite
// v which is used in bpropCommon. So bpropCommon must come first.
bpropCommon(v, ridx, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) {
const set<Layer*>& deviceLayers = it->second;
for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) {
if (_actsGradTarget != (*it2)->getInputIdx(_name)) {
bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name));
}
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0) {
bpropActsCall(v, passType, ridx, _actsGradTarget);
}
}
// Synchronization is necessary because the kernel calls that compute my backward acts
// execute asynchronously. Therefore I don't want to tell other threads that I've
// computed bprop activities for them when in fact I've only called a function which
// will eventually compute them.
if (_prevDeviceIDs.size() > 1 || (_prevDeviceIDs.size() == 1 && _prevDeviceIDs[0] != getDeviceID())) {
syncStream();
}
if (getConvNet().isConserveMemory()) {
truncBwdActs();
}
if (isGradProducer()) {
/*for (int i = 0; i < prev.size(); i++) {
if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) {
prev[i]->getGradReducer().enqueueReduction(getDeviceID());
}
}*/
// Send backward messages to *all* replicas.
// Note that the messages will be dismissed unless the passIdx indicates
// that the previous layer should do some work.
for (int r = 0; r < getNumInputReplicas(); r++) {
for (int i = 0; i < _prev[r].size(); i++) {
if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) {
_prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx));
}
}
}
}
}
IActGradReducer& Layer::getGradReducer() {
return *_gradReducer;
}
// This is called between minibatches
void Layer::reset() {
_rcvdFInputMsgs = 0;
_rcvdBInputMsgs = 0;
for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) {
it->second = 0;
}
}
// This is called between microbatches
void Layer::resetPassIdx() {
_rcvdFInputMsgs = 0;
if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) {
reset();
}
}
/*
* Returns number of cases in given matrix.
*/
int Layer::getNumCases(NVMatrix& v) {
return v.getLeadingDim();
}
int Layer::incRcvdBInputMsgs() {
return ++_rcvdBInputMsgs;
}
std::string& Layer::getName() {
return _name;
}
std::string& Layer::getType() {
return _type;
}
int& Layer::getNumComputedActsGrads(int deviceID) {
return _numComputedActsGrads[deviceID];
}
void Layer::addNext(Layer& l) {
_next.push_back(&l);
_numReplicasNext = l.getNumReplicas();
if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_nextDeviceIDs.size() + 1);
_nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addPrev(Layer& l, int replicaIdx) {
_prev[replicaIdx].push_back(&l);
_numReplicasPrev = l.getNumReplicas();
l.setInputIdx(getName(), _prev[replicaIdx].size() - 1);
if (l.getDeviceID() >= 0 && count(_prevDeviceIDs.begin(), _prevDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_prevDeviceIDs.size() + 1);
_prevDeviceIDs.insert(_prevDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addReplica(Layer& l) {
assert(_replicas.count(l.getReplicaID()) == 0);
_replicas[l.getReplicaID()] = &l;
}
bool Layer::hasGradProducerNext(std::string& layerName) {
bool b = _next.size() == 0;
for (int i = 0; i < _next.size(); i++) {
b |= _next[i]->hasGradProducerNext(_name);
}
return b && isGradProducer(layerName);
}
bool Layer::postInit() {
// We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop().
// In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating
// it from _prev->getActs()
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
if (!_initialized) {
_initialized = true;
map<int,int> numGradProducersNext;
_numGradProducersNext = 0;
for (int r = 0; r < getNumInputReplicas(); ++r) {
for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) {
(*it)->postInit();
}
}
_memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name);
// _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0]
_memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name);
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
_numComputedActsGrads[d] = 0;
if (_next[i]->hasGradProducerNext(_name)) {
if (numGradProducersNext.count(d) == 0) {
numGradProducersNext[d] = 0;
}
numGradProducersNext[d]++;
_numGradProducersNext++;
if (_memSrcActsGrad.count(d) == 0) {
_memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_memSrcActs.count(d) == 0) {
_memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_next.size() == 0) {
_numReplicasNext = getNumReplicas();
}
/*
* Initialize forward broadcaster. First sibling owns it.
*/
if (getReplicaIdx() == 0 && _convNetThread != NULL) {
_actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID()));
_actBroadcaster->start();
}
/*
* Initialize backward reducer.
*/
if (isGradConsumer() && _numGradProducersNext > 0) {
_gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext);
_gradReducer->start();
}
/*
* Initialize specially sorted previous array
*/
for (int r = 0; r < _prev.size(); ++r) {
for (int i = 0; i < _prev[r].size(); ++i) {
// Previous devices in reverse order of processing by (sequential) GradReducer
_prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID()
+ 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]);
}
}
return true;
}
return false;
}
ActBroadcaster& Layer::getActBroadcaster() {
return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers && _prev.size() > 0) {
for (int i = 0; i < _prev[0].size(); i++) {
_gradConsumer |= _prev[0][i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
bool Layer::isGradProducer(std::string& layerName) {
return isGradProducer();
}
map<int,vector<Layer*> >& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
return getActs(getDeviceID());
}
NVMatrix& Layer::getActs(int deviceID) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory();
}
NVMatrix& Layer::getActs(int deviceID, int numCases) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory(numCases);
}
NVMatrix& Layer::getActsGrad(int deviceID) {
assert(_memSrcActsGrad.count(deviceID) > 0);
return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim());
}
NVMatrix& Layer::getActsGrad() {
return getActsGrad(NVMatrix::getDeviceID());
}
map<int, NVMatrix*> Layer::getAllActs() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
map<int, NVMatrix*> Layer::getAllActsGrads() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
int Layer::getDeviceID() {
return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID();
}
ConvNetThread& Layer::getConvNetThread() {
assert(_convNetThread != NULL);
return *_convNetThread;
}
ConvNet& Layer::getConvNet() {
return getConvNetThread().getConvNet();
}
void Layer::setBwdTerminal(int passIdx) {
_bwdTerminal[passIdx] = true;
}
int Layer::getReplicaID() {
return _replicaID;
}
int Layer::getActivePassPeriod() {
return getNumReplicas() / getConvNet().getNumReplicasMin();
}
int Layer::getFwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return passIdx % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getBwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getFwdActiveReplicaIdx(int passIdx) {
assert(_next.size() > 0);
return _next[0]->getFwdActiveInputReplicaIdx(passIdx);
}
int Layer::getNumReplicas() {
return _replicas.size();
}
int Layer::getNumSiblingReplicas() {
return getNumReplicas() / getNumReplicasNext();
}
int Layer::getNumReplicasPrev() {
return _numReplicasPrev;
}
int Layer::getNumReplicasNext() {
return _numReplicasNext;
}
int Layer::getNumInputReplicas() {
return _numReplicasPrev / getNumReplicas();
}
int Layer::getReplicaIdx() {
return getReplicaID() % getNumSiblingReplicas();
}
int Layer::getNumLayersPrev() {
return _prev.size() > 0 ? _prev[0].size() : 0;
}
void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) {
assert(_memSrcActs[deviceID]->isParent());
delete _memSrcActs[deviceID];
_memSrcActs[deviceID] = &mem;
if (_actsTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName()));
}
}
void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) {
assert(_memSrcActsGrad[deviceID]->isParent());
delete _memSrcActsGrad[deviceID];
_memSrcActsGrad[deviceID] = &mem;
if (_actsGradTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName()));
}
}
MemoryView& Layer::getMemorySourceActs(int deviceID) {
return *_memSrcActs[deviceID];
}
MemoryView& Layer::getMemorySourceActsGrad(int deviceID) {
return *_memSrcActsGrad[deviceID];
}
int Layer::getNumOutputs() {
return _numOutputs;
}
void Layer::setInputIdx(std::string& parentName, int idx) {
_inputIndices[parentName] = idx;
}
int Layer::getInputIdx(std::string& parentName) {
return _inputIndices[parentName];
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true) {
PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron");
_neuronType = pyDictGetString(neuronDict, "type");
_neuron = &Neuron::makeNeuron(neuronDict);
}
NeuronLayer::~NeuronLayer() {
delete _neuron;
}
void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) {
_neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0);
}
}
bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// Special optimization for cross-entropy objective with logistic units.
// Better to just compute the input gradient in one go to avoid division by small numbers.
bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1
&& (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce")
&& _next[0]->getDeviceID() == getDeviceID()
&& _next[0]->getNumReplicas() == getNumReplicas();
LayerV& prev = _prev[replicaIdx];
if (doCrossEntGrad) {
NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID());
BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]);
float gradCoeff = cost.getCoeff();
labels.transpose(_trans);
if (cost.getPosWeight() == 1) {
if (scaleTargets == 0) {
getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
} else {
if (scaleTargets == 0) {
getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
}
}
return doCrossEntGrad;
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_neuron->activate(*_inputs[0], getActs());
}
std::string& NeuronLayer::getNeuronType() {
return _neuronType;
}
/*
* =======================
* WeightLayer
* =======================
*
* The useGrad parameter here merely expresses a preference by the subclass. It may
* be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes.
* So when computing gradient updates, the subclass must always first check weights.isUseGrad().
*
* Note: biases always useGrad.
*/
WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) :
Layer(convNetThread, paramsDict, replicaID, trans) {
_weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod");
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW");
PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed");
int numEpochs = pyDictGetInt(paramsDict, "num_epochs");
/*
* When there are multiple replicas, the present implementation
* requires that useGrad is true. This is because weights.update()
* performs a simultaneous write to both replicas' weightsInc matrix,
* which means that the read should come from somewhere else (i.e. a
* grads matrix).
*/
useGrad |= _numReplicas > 1;
// Source layers for shared weights
stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
_weights = new WeightList();
for (int i = 0; i < weightSourceLayers.size(); i++) {
std::string& srcLayerName = weightSourceLayers[i];
int matrixIdx = weightSourceMatrixIndices[i];
PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i);
ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW, numEpochs); // Learning rate schedule
if (srcLayerName == _name) { // Current layer
_weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this));
} else if (srcLayerName != "") {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights->addWeights(*new Weights(*srcWeights, lrs, *this));
} else {
_weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB, numEpochs), *this, 0, 0, momB, true);
delete &weightSourceLayers;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &wc;
delete &wball;
_wStep = 0.02;
_bStep = 0.005;
}
WeightLayer::~WeightLayer() {
delete _weights;
delete _biases;
}
bool WeightLayer::postInit() {
if (Layer::postInit()) {
_weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod());
assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0);
return true;
}
return false;
}
void WeightLayer::fpropCommon(PASS_TYPE passType) {
}
void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) {
if (_biases->getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropBiases(v, passType);
} else {
_biases->getGrad().resize(_biases->getW());
_biases->getGrad().scale(getBIncScale());
}
_biases->incNumUpdates();
}
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropWeights(v, replicaIdx, i, passType);
} else {
_weights->at(i).getGrad().resize(_weights->at(i).getW());
// This will cause it to forget momentum when shown 0 training cases
// and _useGrad = false but it's not too important.
_weights->at(i).getGrad().scale(getIncScale(i, passType));
}
// Increment its number of updates
_weights->at(i).incNumUpdates();
}
}
}
bool WeightLayer::updateWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_weights->update(getConvNet().getTrainingProgress());
_biases->update(getConvNet().getTrainingProgress());
// constrainWeights();
return true;
}
return false;
}
bool WeightLayer::constrainWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_constrainWeights();
return true;
}
return false;
}
void WeightLayer::_constrainWeights() {
}
void WeightLayer::copyToCPU() {
_weights->copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights->copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradient() {
for (int i = 0; i < _weights->getSize(); i++) {
getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i));
}
getConvNet().checkGradient(_name + " biases", _bStep, *_biases);
}
void WeightLayer::addReplica(Layer& l) {
Layer::addReplica(l);
_weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights);
_biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights->at(idx);
}
float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) {
// weight update period must be multiple of activation period
// TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate.
double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses()));
if (_weights->at(inpIdx).isUseGrad()) {
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases;
}
float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) {
if (_weights->at(inpIdx).isUseGrad()) {
return _weights->at(inpIdx).getNumUpdates() > 0;
}
return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0
: (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f));
}
NVMatrix& WeightLayer::getGradTarget(int inpIdx) {
return _weights->at(inpIdx).getGrad();
}
float WeightLayer::getBGradScale(PASS_TYPE passType) {
int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses());
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
float WeightLayer::getBIncScale() {
return _biases->getNumUpdates() > 0;
}
NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) {
return _weights->at(inpIdx).getW();
}
NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) {
return _biases->getW();
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) {
_wStep = 0.01;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
}
void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType));
}
void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
float scaleGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
void FCLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2; // Unfortunate extra weight matrix...
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* SplitFCLayer
* =======================
*/
SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: FCLayer(convNetThread, paramsDict, replicaID, useGrad) {
_numParts = pyDictGetInt(paramsDict, "parts");
}
void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true);
NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts);
NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts);
NVMatrixV& splitTarget = getActs().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
deleteElements(splitInput, true);
deleteElements(splitWeights, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts);
NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1);
delete &weights_T;
deleteElements(splitV, true);
deleteElements(splitWeights_T, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts);
NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType));
delete &prevActs_T;
deleteElements(splitPrevActs_T, true);
deleteElements(splitV, true);
deleteElements(splitGradTarget, true);
}
/*
* =======================
* TwoDLayerInterface
* =======================
*/
TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_imgPixels = _imgSize * _imgSize;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
}
LocalLayer::~LocalLayer() {
delete _padding;
delete _stride;
delete _filterSize;
delete _channels;
delete _imgSize;
delete _groups;
delete _filterChannels;
delete _filterPixels;
delete _imgPixels;
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, true) {
_sumWidth = pyDictGetInt(paramsDict, "sumWidth");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
_weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin");
_weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax");
_flipWeights = pyDictGetInt(paramsDict, "flipWeights");
}
ConvLayer::~ConvLayer() {
delete _weightContrastNormMin;
delete _weightContrastNormMax;
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(getBiasMatrix(passType));
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(getBiasMatrix(passType));
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
float scaleBGrad = getBGradScale(passType);
float scaleInc = getBIncScale();
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
assert(_weights->at(inpIdx).isUseGrad());
bool doPartialSum = _sumWidth < _modulesX;
//NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad();
NVMatrix& tgt = doPartialSum ? _weightGradTmp : getWeightGrad(passType, inpIdx);
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum;
convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad);
if (doPartialSum) {
scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0;
int outWidth = DIVUP(_modulesX, _sumWidth);
_weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
//_weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
//_weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
NVMatrix& tgt2 = getWeightGrad(passType, inpIdx);
tgt2.addSum(_weightGradTmp, 0, scaleTargets, 1);
tgt2.reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
setWeightGrad(passType, inpIdx, scaleTargets);
}
void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
_weightGradTmp.truncate();
}
void ConvLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
float fz = _weights->at(i).getW().getNumRows();
NVMatrix tmp;
_weights->at(i).getW().sum(0, tmp);
_weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad());
// Now _weights->at(i).getGrad() contains zero-mean filters
_weights->at(i).getGrad().apply(NVMatrixOps::Square());
_weights->at(i).getGrad().sum(0, tmp);
tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz));
// Now tmp has the stdev
_weights->at(i).getW().eltwiseMultByVector(tmp);
}
// It's pretty silly to do both these things but whatever
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2;
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
NVMatrix& ConvLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) {
if( !_flipWeights ) return _weights->at(inpIdx).getW();
_weights->at(inpIdx).getW().flipSquare(0,_weightsFlipped);
return _weightsFlipped;
}
NVMatrix& ConvLayer::getWeightGrad(PASS_TYPE passType, int inpIdx) {
if( !_flipWeights ) return _weights->at(inpIdx).getGrad();
_weights->at(inpIdx).getGrad().flipSquare(0,_weightsGradFlipped);
return _weightsGradFlipped;
}
void ConvLayer::setWeightGrad(PASS_TYPE passType, int inpIdx, float scaleTargets) {
if( !_flipWeights ) return;
_weightsGradFlipped.flipSquare(0,_weightsGradUnflipped);
_weights->at(inpIdx).getGrad().add(_weightsGradUnflipped, scaleTargets, 1);
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType));
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType));
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void LocalUnsharedLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall());
}
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& input = *_inputs[0];
input.max(1, _max);
input.addVector(_max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
getActs().sum(1, _sum);
getActs().eltwiseDivideByVector(_sum);
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
LayerV& prev = _prev[replicaIdx];
if (_doUpperGrad) {
// Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense
for (int i = 0; i < _next.size(); ++i) {
if (_next[i]->isGradProducer(getName())) {
NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels
float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
break;
}
}
} else {
computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1);
}
}
void SoftmaxLayer::setDoUpperGrad(bool b) {
_doUpperGrad = b;
}
/*
* =======================
* ConcatenationLayer
* =======================
*/
ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
_copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets");
_copyOffsets->push_back(_numOutputs);
}
ConcatenationLayer::~ConcatenationLayer() {
delete _copyOffsets;
}
void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols());
_inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0);
}
void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view
_prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1);
delete ©Src;
}
/*
* =======================
* PassThroughLayer
* =======================
*/
PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
}
void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// No-op
}
void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// No-op
}
bool PassThroughLayer::postInit() {
if (Layer::postInit()) {
assert(getNumInputReplicas() == 1);
for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) {
MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
_prev[0][i]->setMemorySourceActs(getDeviceID(), vActs);
_prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad);
}
return true;
}
return false;
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
EltwiseSumLayer::~EltwiseSumLayer() {
delete _coeffs;
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx));
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DropoutLayer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans) {
_enable = pyDictGetInt(paramsDict, "enable");
_keep = pyDictGetFloat(paramsDict, "keep");
}
void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.apply(DropoutSmallerThanOperator(_keep));
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->copy(getActs());
}
}
void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1);
}
}
void DropoutLayer::truncBwdActs() {
Layer::truncBwdActs();
_keepMask.truncate();
}
/*
* =======================
* Dropout2Layer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID, false) {
}
void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.smallerThanScalar(_keep);
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->scale(_keep, getActs());
}
}
void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
if (scaleTargets != 0) {
v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)),
prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.scale(_keep, prev[inpIdx]->getActsGrad());
}
}
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
_start = pyDictGetInt(paramsDict, "start");
_end = pyDictGetInt(paramsDict, "end");
_useBuffer = false;
_outstandingCopyRequest = false;
_convNet = convNet;
}
DataLayer::~DataLayer() {
for (map<int,cudaStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) {
checkCudaErrors(cudaStreamDestroy(it->second));
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
_copier->stop();
delete _copier;
}
void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) {
waitForCopyFinish();
if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) {
_useBuffer = !_useBuffer;
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
void DataLayer::waitForCopyFinish() {
if (_outstandingCopyRequest) {
_copyFinishQueue.dequeue();
assert(_copyFinishQueue.getNumElements() == 0);
_outstandingCopyRequest = false;
}
}
cudaStream_t DataLayer::getCopyStream(int deviceID) {
if (_copyStreams.count(deviceID) == 0) {
NVMatrix::setDeviceID(deviceID);
checkCudaErrors(cudaStreamCreateWithFlags(&_copyStreams[deviceID], cudaStreamNonBlocking));
}
return _copyStreams[deviceID];
}
void DataLayer::copyData(CPUData& data, bool other, int passIdx) {
assert(!_outstandingCopyRequest);
assert(_copyFinishQueue.getNumElements() == 0);
_copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx));
_outstandingCopyRequest = true;
}
int DataLayer::getNumInputReplicas() {
return _convNet->getNumReplicasMax() / getNumReplicas();
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
}
NVMatrix& DataLayer::getActs(int deviceID) {
return getActs(deviceID, false, -1);
}
NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) {
// printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases);
assert(_memSrcActs.count(deviceID) > 0);
assert(_memSrcActs2.count(deviceID) > 0);
return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases));
}
ConvNet& DataLayer::getConvNet() {
return *_convNet;
}
bool DataLayer::postInit() {
if (Layer::postInit()) {
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
if (_memSrcActs2.count(d) == 0) {
_memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
intv cpus = getDeviceCPUs(_next[0]->getDeviceID());
_copier = new DataCopyThread(*this, cpus);
_copier->start();
return true;
}
return false;
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =======================
* DataCopyThread
* =======================
*/
DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) {
}
Queue<DataCopyMessage*>& DataCopyThread::getQueue() {
return _queue;
}
void DataCopyThread::stop() {
getQueue().enqueue(new DataCopyExitMessage());
join();
}
void* DataCopyThread::run() {
NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin());
bool exit = false;
while(!exit) {
DataCopyMessage& msg = *_queue.dequeue();
exit = msg.getType() == DataCopyMessage::EXIT;
if (!exit) {
CPUData& data = msg.getData();
int passIdx = msg.getPassIdx();
bool other = msg.isOther();
Matrix& dataMatrix = data.getData(_parent->getDataIdx());
// How many times is this layer going to process microbatches from this minibatch?
assert(_parent->getNumReplicasNext() == _parent->getNumReplicas());
int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx);
if (microIdx >= 0) {
if (_requestTimer.isStarted()) {
double requestIntervalMsec = _requestTimer.stop();
// Sleep for up to 1/20th the average request interval
_sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0));
}
_requestTimer.start();
if (other) {
// Sleeping a bit is helpful because in typical nets, copying input data
// as soon as it's available will produce contention with other communications
// that are happening at the time. This is very much a hack, so in the future
// it might be good to replace it with something smarter which schedules access
// to communication links.
usleep(_sleepUsec);
}
microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas();
// Safer to divup because this way you won't get a minibatch size of 0
int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax());
int microStart = microIdx * microbatchSize;
int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize);
// Check that this replica has some data. This can be false when, for example,
// there are only 7 examples in the minibatch but 8 replicas.
if (microStart < microEnd) {
assert(dataMatrix.isView() == dataMatrix.isTrans());
int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2);
if (dataMatrix.isTrans()) {
Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd);
// In this case, dataMatrix is a view on memory allocated by Python.
//_hostMemFwd.copyFromHost(replicaDataMatrix, true);
_hostMemFwd.resize(replicaDataMatrix.getNumRows(), replicaDataMatrix.getNumCols(), true);
memcpy(_hostMemFwd.getDevData(), replicaDataMatrix.getData(), replicaDataMatrix.getNumDataBytes());
delete &replicaDataMatrix; // view
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
// Note to self: this is the path that gets executed in practice
// in my models. It does a transpose & copy simultaneously.
hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
} else {
// Hacky way to copy a slice to _hostMemFwd
_hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart);
Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans());
dataMatrix.sliceCols(microStart, microEnd, tmp);
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
}
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
NVMatrix::setDeviceID(deviceID);
NVMatrix::syncStream(_parent->getCopyStream(deviceID));
}
_parent->getConvNet().getDataCopyPD().freePipe(pipe);
} else {
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
_parent->getActs(deviceID, other, 0);
}
}
}
_parent->getCopyFinishQueue().enqueue(1);
}
delete &msg;
}
return NULL;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false);
} else if(_pool == "maxabs") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) {
_sum = pyDictGetInt(paramsDict, "sum");
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_sum) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler<true>());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler<false>());
}
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, _sum, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_abs) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* CrossMapPoolLayer
* =====================
*/
CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputs = pyDictGetInt(paramsDict, "outputChannels");
_pool = pyDictGetString(paramsDict, "pool");
}
CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* CrossMapMaxPoolLayer
* =====================
*/
CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler());
}
void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
/*
* =====================
* RandomScaleLayer
* =====================
*/
RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_maxScale = pyDictGetFloat(paramsDict, "maxScale");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
// The smallest size the image could be after rescaling
_minScaledSize = _imgSize / _maxScale;
// The number of discrete scales we're considering
int numScales = _imgSize - _minScaledSize + 1;
// The total number of squares of size _tgtSize that we can extract
// from all these scales
double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6;
// For each scale, record the fraction of the squares that it has.
// This will be the probability of sampling this scale.
_scaleProbs.push_back(1.0 / numCrops);
for (int s = 1; s < numScales; ++s) {
_scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops);
}
}
void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (IS_TRAIN(passType)) {
// _maxScale is in the range [1, 2)
float r = randf;
int rescaledSize = _tgtSize;
float scaleFactor = _maxScale;
// Find which scale we have sampled
for (int s = 0; s < _scaleProbs.size(); ++s) {
if (r <= _scaleProbs[s]) {
rescaledSize += s;
float scaleFactorEnd = _imgSize / float(rescaledSize);
float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize));
scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart);
break;
}
}
assert(rescaledSize >= _tgtSize);
int maxStart = rescaledSize - _tgtSize;
int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart);
if (rescaledSize == _imgSize) {
convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX);
} else {
convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor);
convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX);
}
_rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it.
} else if (IS_MULTIVIEW_TEST(passType)) { // for now...
_inputs[0]->copy(getActs());
} else if (IS_TEST(passType)) { // Test on center patch
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale);
}
}
void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* CropLayer
* =====================
*/
CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_startX = pyDictGetInt(paramsDict, "startX");
_startY = pyDictGetInt(paramsDict, "startY");
_tgtSize = pyDictGetInt(paramsDict, "sizeX");
}
void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX);
}
void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
GaussianBlurLayer::~GaussianBlurLayer() {
delete _hFilter;
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad();
convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1);
convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* HorizontalReflectionLayer
* =====================
*/
HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
assert(_channels >= 1 && _channels <= 3);
}
void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convReflectHorizontal(*_inputs[0], getActs(), _imgSize);
}
void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
_minDiv = pyDictGetFloat(paramsDict, "minDiv");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
_denoms.truncate();
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: ResponseNormLayer(convNetThread, paramsDict, replicaID) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
assert(inpIdx == 0);
convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) {
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler<false>());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
_meanDiffs.truncate();
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
_numCases = 0;
_aggregated = pyDictGetInt(paramsDict, "aggregated") != 0;
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
if (_coeff != 0) {
Layer::bprop(v, passType, passIdx);
}
}
bool CostLayer::fprop(PASS_TYPE passType, int passIdx) {
if (Layer::fprop(passType, passIdx)) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL));
return true;
}
return false;
}
void CostLayer::fpropCommon(PASS_TYPE passType) {
_numCases = Layer::getNumCases(*_inputs[0]);
}
int CostLayer::getNumCases() {
return _numCases;
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
return *new doublev(_costv);
}
// This is called between microbatches
void CostLayer::resetPassIdx() {
Layer::resetPassIdx();
_costv.clear();
}
CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) {
if (type == "cost.crossent") {
return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.bce") {
return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.dce") {
return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.logreg") {
return *new LogregCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.ice") {
return *new IndepCrossEntCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.normp") {
return *new NormPCostLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown cost layer type ") + type;
}
/*
* =====================
* CrossEntCostLayer
* =====================
*/
CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
computeCrossEntCost(labels, probs, _trueLabelLogProbs, _correctProbs);
_costv.clear();
_costv.push_back(-_trueLabelLogProbs.sum());
_costv.push_back(numCases - _correctProbs.sum());
}
}
void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID();
if (doWork) {
computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* BinomialCrossEntropyCostLayer
* =====================
*/
BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate");
_posWeight = pyDictGetFloat(paramsDict, "posWeight");
}
void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs);
_costv.clear();
// Cross-entropy cost
_costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim());
// If aggregated, we don't produce these outputs because they're not additive.
// They have no meaning if this is just a partial cost.
if (!_aggregated) {
// "Correct" classifications. To compute these we threshold probs
// and just count the number of entries that agree with labels.
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.equals(labels);
_costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim()));
if (_computeSoftmaxErrorRate) {
// Also compute top-1 error as if this is softmax and there's only one correct class
probs.max(0, _tmpVec);
assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis
probs.equalsVector(_tmpVec, _correctProbs);
_correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present
float m = _tmpVec.max();
_correctProbs.eltwiseDivideByVector(_tmpVec);
_correctProbs.eltwiseMult(labels);
_costv.push_back(numCases - _correctProbs.sum(_tmpbuf));
}
}
}
}
void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1
|| prev[1]->getType() != "neuron"
|| static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic"
|| prev[1]->getDeviceID() != getDeviceID()
|| prev[1]->getNumReplicas() != getNumReplicas();
if (doWork) {
printf("Computing cross-entropy gradient the stupid way\n");
if (scaleTargets == 0) {
labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target);
} else {
labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target);
}
}
}
float BinomialCrossEntropyCostLayer::getPosWeight() {
return _posWeight;
}
/*
* =====================
* DetectionCrossEntropyCostLayer
* =====================
*/
DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) {
assert(!_aggregated);
}
void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx);
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
/*
* Add information sufficient to compute precision and recall for each class.
*/
// NOTE: _tmpProbs contains ((probs > 0.5) == labels)
labels.sum(1, _numPositive); // sum(labels, 1)
_tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels)
_tmpProbs.sum(1, _numTruePositive);
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.sum(1, _numDeclaredPositive);
_numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true);
_numPositive.copyToHost(_hNumPositive, true);
_numTruePositive.copyToHost(_hNumTruePositive, true);
for (int i = 0; i < labels.getFollowingDim(); ++i) {
_costv.push_back(_hNumDeclaredPositive(i, 0)); // 2
_costv.push_back(_hNumPositive(i, 0)); // 3
_costv.push_back(_hNumTruePositive(i, 0)); // 4
}
}
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_topk = pyDictGetInt(paramsDict, "topk");
// _numAccumed = 0;
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix* probs = _inputs[1];
_doCompute = !IS_MULTIVIEW_TEST(passType);
if (!_doCompute) {
if (IS_MULTIVIEW_TEST_START(passType)) {
if (_probsAccum.count(passIdx) == 0) {
_probsAccum[passIdx] = new NVMatrix(*probs);
}
probs->copy(*_probsAccum[passIdx]);
_numAccumed[passIdx] = 1;
} else {
_probsAccum[passIdx]->add(*probs);
_numAccumed[passIdx] += 1;
}
if (IS_MULTIVIEW_TEST_END(passType)) {
probs = _probsAccum[passIdx];
probs->scale(1.0 / _numAccumed[passIdx]);
_doCompute = true;
}
}
if (_doCompute) {
int numCases = labels.getNumElements();
probs->max(0,_maxProbs);
if (_topk == 1) {
computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs);
} else {
computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk);
}
_costv.clear();
double top1 = _correctProbs.sum(_tmpbuf);
_costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf));
_costv.push_back(numCases - top1);
_costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf)));
}
}
}
NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) {
return *_probsAccum[replicaIdx];
}
void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax"
|| prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas();
if (prev[1]->getType() == "softmax") {
static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork);
}
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_divideInput = pyDictGetFloat(paramsDict, "divideInput");
_alpha = pyDictGetFloat(paramsDict, "alpha"); // just used to scale cost, use coeff to scale gradients
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_inputs[0]->scale(1.0/_divideInput, _tmp); _tmp.apply(NVMatrixOps::Square());
float cost = 0.5*_tmp.sum()*_alpha; _costv.clear(); _costv.push_back(cost);
//printf("cost %s %g\n",_name.c_str(),cost);
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -1.0 / _divideInput / _divideInput * _coeff );
}
/*
* =====================
* NormPCostLayer
* =====================
*/
NormPCostLayer::NormPCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_p = pyDictGetInt(paramsDict, "p");
_divideInput = pyDictGetFloat(paramsDict, "divideInput");
_alpha = pyDictGetFloat(paramsDict, "alpha"); // just used to scale cost, use coeff to scale gradients
}
void NormPCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_inputs[0]->scale(1.0/_divideInput, _tmp);
if( _p==1 ) {
_tmp.apply(NVMatrixOps::Abs()); // L1 norm just abs
} else if( _p ==2 ) {
_tmp.apply(NVMatrixOps::Square()); // same as SumOfSquares
} else if( _p % 2 == 0) {
_tmp.apply(NVMatrixOps::Pow((float)_p)); // even, no abs needed
} else {
_tmp.apply(NVMatrixOps::PowAbs(_p));
}
float cost = _tmp.sum() / _p * _alpha; _costv.clear(); _costv.push_back(cost);
//printf("cost %s %g\n",_name.c_str(),cost);
}
void NormPCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// |x| derivative is approximately sign(x) (ignore zero)
if( _p == 1 ) {
_inputs[0]->apply(NVMatrixOps::Sign(), _tmpGrad1);
_prev[replicaIdx][inpIdx]->getActsGrad().add(_tmpGrad1, scaleTargets, 1.0 / _divideInput / _divideInput * _coeff );
} else if( _p ==2 ) {
// same as SumOfSquares, but specify sign with coeff
_prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, 1.0 / _divideInput / _divideInput * _coeff );
} else {
_inputs[0]->scale(1.0/_divideInput, _tmpGrad1); _tmpGrad1.apply(NVMatrixOps::Sign(), _tmpGrad2);
if( _p == 3 ) {
_tmpGrad1.apply(NVMatrixOps::Square());
} else if( _p % 2 == 1 ) {
_tmpGrad1.apply(NVMatrixOps::Pow((float)_p-1));
} else {
_tmpGrad1.apply(NVMatrixOps::PowAbs((float)_p-1));
}
_tmpGrad1.eltwiseMult(_tmpGrad2);
_prev[replicaIdx][inpIdx]->getActsGrad().add(_tmpGrad1, scaleTargets, 1.0 / _divideInput * _coeff );
}
}
/*
* =====================
* UnPoolLayer
* =====================
*/
UnPoolLayer::UnPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_pool = pyDictGetString(paramsDict, "pool");
//_scale = pyDictGetFloat(paramsDict, "scale");
_scale = 1.0;
}
UnPoolLayer& UnPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxUnPoolLayer(convNetThread, paramsDict, replicaID, false);
} else if(_pool == "maxabs") {
return *new MaxUnPoolLayer(convNetThread, paramsDict, replicaID, true);
// xxx - not sure about this, also not using it
//} else if(_pool == "avg") {
// assert(0);
// return *new AvgUnPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown unpooling layer type ") + _pool;
}
/*
* =====================
* AvgUnPoolLayer
* =====================
*/
/*
AvgUnPoolLayer::AvgUnPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : UnPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void AvgUnPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
//convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
convLocalPool(*_inputs[0], _tmppool, _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
convLocalAvgUndo(_tmppool, getActs(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
void AvgUnPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
//convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
convLocalPool(v, _tmppool, _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
convLocalAvgUndo(_tmppool, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
*/
/*
* =====================
* MaxUnPoolLayer
* =====================
*/
MaxUnPoolLayer::MaxUnPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : UnPoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) {
}
void MaxUnPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_abs) {
//convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
convLocalPool(*_inputs[0], _tmppool, _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
} else {
//convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
convLocalPool(*_inputs[0], _tmppool, _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
convLocalMaxUndo(*_inputs[0], _tmppool, _tmppool, getActs(), _sizeX, _start, _stride, _outputsX, scaleTargets, _scale);
}
//void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target,
//void convLocalMaxUnpool(NVMatrix& images, NVMatrix& grads, NVMatrix& maxActs, NVMatrix& target,
void MaxUnPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
//convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
convLocalMaxUnpool(*_inputs[0], v, _tmppool, _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, _scale);
}
/*
* =====================
* CrossMapUnPoolLayer
* =====================
*/
CrossMapUnPoolLayer::CrossMapUnPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputs = pyDictGetInt(paramsDict, "outputChannels");
_pool = pyDictGetString(paramsDict, "pool");
}
CrossMapUnPoolLayer& CrossMapUnPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new CrossMapMaxUnPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown unpooling layer type ") + _pool;
}
/*
* =====================
* CrossMapMaxUnPoolLayer
* =====================
*/
CrossMapMaxUnPoolLayer::CrossMapMaxUnPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapUnPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossMapMaxUnPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
//convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler());
convPoolCrossMap(*_inputs[0], _tmppool, _start, _size, _outputs, _stride, _imgSize, MaxPooler());
convCrossMapMaxPoolUndo(*_inputs[0], _tmppool, _tmppool, getActs(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
void CrossMapMaxUnPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
//convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
convCrossMapMaxUnPool(*_inputs[0], v, _tmppool, _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
/*
* =======================
* ConvDecoderLayer
* =======================
*/
ConvDecoderLayer::ConvDecoderLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: ConvLayer(convNetThread, paramsDict, replicaID) {
_nPadFilters = pyDictGetInt(paramsDict, "nPadFilters");
_sumFeatures = pyDictGetInt(paramsDict, "sumFeatures"); // incorporates summing over feature maps (per channel)
}
void ConvDecoderLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
int nchans = _channels->at(inpIdx); int imgPixels = _imgPixels->at(inpIdx);
int fc = _filterChannels->at(inpIdx); int chanPixels = fc*imgPixels; int tgtStep = _sumFeatures ? 0 : chanPixels;
int fPixels = _filterPixels->at(inpIdx); //int padPixels = _nPadFilters*imgPixels;
assert(_numFilters==nchans*fc); // decoding same filters from previous with specified number of channels
_tmpWeights.resize(fPixels, _nPadFilters); _tmpWeights.zero(); NVMatrix& wSlcOut = _tmpWeights.sliceCols(0,fc);
NVMatrix &weightMatrix = getWeightMatrix(passType, inpIdx);
for( int i=0, j=0, k=0, m=0; i < nchans; i++, j+=imgPixels, k+=fc, m+=tgtStep ) {
NVMatrix& inpSlc = _inputs[inpIdx]->sliceRows(j,j+imgPixels);
// only want to convolve each channel with specified filters per channel, pad with zeros for required filler
NVMatrix& wSlcIn = weightMatrix.sliceCols(k,k+fc); wSlcIn.copy(wSlcOut);
convFilterActs(inpSlc, _tmpWeights, _tmpTarget, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), 1, _groups->at(inpIdx), 0, 1);
NVMatrix& tgtSlcIn = _tmpTarget.sliceRows(0,chanPixels);
NVMatrix& tgtSlcOut = getActs().sliceRows(m,m+chanPixels);
tgtSlcOut.add(tgtSlcIn, scaleTargets || (_sumFeatures && i>0), 1);
//syncStream(); // no other choice for this crappy method
delete &inpSlc; delete &wSlcIn; delete &tgtSlcIn; delete &tgtSlcOut;
}
delete &wSlcOut;
if (scaleTargets == 0) {
if (_sharedBiases) {
int nFilters = _sumFeatures ? fc : _numFilters;
getActs().reshape(nFilters, getActs().getNumElements() / nFilters);
getActs().addVector(getBiasMatrix(passType));
getActs().reshape(nFilters * _modules, getActs().getNumElements() / (nFilters * _modules));
} else {
getActs().addVector(getBiasMatrix(passType));
}
}
}
void ConvDecoderLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int fc = _filterChannels->at(0); //int chanPixels = fc*imgPixels; int tgtStep = _sumFeatures ? 0 : chanPixels;
float scaleBGrad = getBGradScale(passType);
float scaleInc = getBIncScale();
if (_sharedBiases) {
int nFilters = _sumFeatures ? fc : _numFilters;
v.reshape(nFilters, v.getNumElements() / nFilters);
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
v.reshape(nFilters * _modules, v.getNumElements() / (nFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
}
}
void ConvDecoderLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
assert(_weights->at(inpIdx).isUseGrad());
bool doPartialSum = _sumWidth < _modulesX;
//NVMatrix& tgt = doPartialSum ? _weightGradTmp : getWeightGrad(passType, inpIdx);
NVMatrix& tgt = doPartialSum ? _tmpGradP : _tmpGrad;
int outWidth = DIVUP(_modulesX, _sumWidth);
//if( doPartialSum ) _tmpGrad.resize(_filterPixels->at(inpIdx), _nPadFilters);
float scaleWGrad = getGradScale(inpIdx, passType);
//float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum;
float scaleTargets = getIncScale(inpIdx, passType);
int nchans = _channels->at(inpIdx); int imgPixels = _imgPixels->at(inpIdx);
int fc = _filterChannels->at(inpIdx); int chanPixels = fc*imgPixels; int tgtStep = _sumFeatures ? 0 : chanPixels;
int fPixels = _filterPixels->at(inpIdx); int padPixels = _nPadFilters*imgPixels;
assert(_numFilters==nchans*fc); // decoding same filters from previous with specified number of channels
_tmpVbw.resize(padPixels, v.getNumCols()); _tmpVbw.zero(); NVMatrix& vSlcOut = _tmpVbw.sliceRows(0,chanPixels);
NVMatrix &gradMatrix = getWeightGrad(passType, inpIdx);
for( int i=0, j=0, k=0, m=0; i < nchans; i++, j+=imgPixels, k+=fc, m+=tgtStep ) {
NVMatrix& inpSlc = _inputs[inpIdx]->sliceRows(j,j+imgPixels);
// only want to convolve each channel with specified filters per channel, pad with zeros for required filler
NVMatrix& vSlcIn = v.sliceRows(m,m+chanPixels); vSlcIn.copy(vSlcOut);
convWeightActs(inpSlc, _tmpVbw, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx),
_padding->at(inpIdx), _stride->at(inpIdx), 1, _groups->at(inpIdx), _sumWidth, 0, scaleWGrad);
if (doPartialSum) {
_tmpGradP.reshape(outWidth*outWidth, fPixels * _nPadFilters);
_tmpGradP.sum(0, _tmpGrad);
_tmpGrad.reshape(fPixels, _nPadFilters);
}
NVMatrix& gSlcIn = _tmpGrad.sliceCols(0,fc);
NVMatrix& gSlcOut = gradMatrix.sliceCols(k,k+fc);
gSlcOut.add(gSlcIn, scaleTargets, 1);
//syncStream(); // no other choice for this crappy method
delete &inpSlc; delete &vSlcIn; delete &gSlcIn; delete &gSlcOut;
}
delete &vSlcOut;
setWeightGrad(passType, inpIdx, scaleTargets);
}
void ConvDecoderLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
int nchans = _channels->at(inpIdx); int imgPixels = _imgPixels->at(inpIdx);
int fc = _filterChannels->at(inpIdx); int chanPixels = fc*imgPixels; int tgtStep = _sumFeatures ? 0 : chanPixels;
int fPixels = _filterPixels->at(inpIdx); int padPixels = _nPadFilters*imgPixels;
assert(_numFilters==nchans*fc); // decoding same filters from previous with specified number of channels
_tmpWeights.resize(fPixels, _nPadFilters); _tmpWeights.zero(); NVMatrix& wSlcOut = _tmpWeights.sliceCols(0,fc);
_tmpVbp.resize(padPixels, v.getNumCols()); _tmpVbp.zero(); NVMatrix& vSlcOut = _tmpVbp.sliceRows(0,chanPixels);
NVMatrix &weightMatrix = getWeightMatrix(passType, inpIdx);
for( int i=0, j=0, k=0, m=0; i < nchans; i++, j+=imgPixels, k+=fc, m+=tgtStep ) {
// only want to convolve each channel with specified filters per channel, pad with zeros for required filler
NVMatrix& wSlcIn = weightMatrix.sliceCols(k,k+fc); wSlcIn.copy(wSlcOut);
NVMatrix& vSlcIn = v.sliceRows(m,m+chanPixels); vSlcIn.copy(vSlcOut);
convImgActs(_tmpVbp, _tmpWeights, _tmpTarget, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), 1, _groups->at(inpIdx), 0, 1);
NVMatrix& tgtSlcIn = _tmpTarget.sliceRows(0,imgPixels);
NVMatrix& tgtSlcOut = _prev[replicaIdx][inpIdx]->getActsGrad().sliceRows(j,j+imgPixels);
tgtSlcOut.add(tgtSlcIn, scaleTargets, 1);
//syncStream(); // no other choice for this crappy method
delete &wSlcIn; delete &vSlcIn; delete &tgtSlcIn; delete &tgtSlcOut;
}
delete &wSlcOut; delete &vSlcOut;
}
/*
* =======================
* SumLayer
* =======================
*/
SumLayer::SumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true) {
_stride = pyDictGetInt(paramsDict, "stride");
_noutputs = pyDictGetInt(paramsDict, "outputs");
}
void SumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
//getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx));
//int nInputs = _inputs[inpIdx]->getNumCols(), nCases = _inputs[inpIdx]->getNumRows();
NVMatrix& slc = _inputs[inpIdx]->sliceCols(0,_noutputs); _cursum.add(slc, 0, 1);
for( int i=_noutputs, j=1; j<_stride; i+=_noutputs, j++ ) {
NVMatrix& slc2 = _inputs[inpIdx]->sliceCols(i,i+_noutputs); _cursum.add(slc, 1, 1);
//syncStream(); // no other choice for this crappy method
delete &slc2;
}
delete &slc;
getActs().add(_cursum, scaleTargets, 1);
}
void SumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
//_prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
NVMatrix &target = _prev[replicaIdx][inpIdx]->getActsGrad();
for( int i=0, j=0; j<_stride; i+=_noutputs, j++ ) {
NVMatrix& slc = target.sliceCols(i,i+_noutputs); slc.add(v, scaleTargets, 1);
//syncStream(); // no other choice for this crappy method
delete &slc;
}
}
/*
* =====================
* WhitenLayer
* =====================
*/
// needs to be a transpose layer because acting similar as a weight layer (using whitening matrix instead of weights)
WhitenLayer::WhitenLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true), TwoDLayerInterface(paramsDict) {
_hwhiten = pyDictGetMatrix(paramsDict, "whiten");
_whiten = NULL;
_wmean = pyDictGetFloat(paramsDict, "wmean");
_wstd = pyDictGetFloat(paramsDict, "wstd");
}
WhitenLayer::~WhitenLayer() {
delete _hwhiten;
}
void WhitenLayer::copyToGPU() {
_whiten.copyFromHost(*_hwhiten, true);
}
void WhitenLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
//convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
//convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
if( _channels == 1 ) {
getActs().addScalar(-_wmean); getActs().addProduct(*_inputs[inpIdx], _whiten, scaleTargets, 1/_wstd);
} else {
for( int i=0, j=0; i < _channels; i++, j+=_imgPixels ) {
NVMatrix& inpSlc = _inputs[inpIdx]->sliceRows(j,j+_imgPixels);
NVMatrix& tgtSlc = getActs().sliceRows(j,j+_imgPixels);
tgtSlc.addScalar(-_wmean); tgtSlc.addProduct(inpSlc, _whiten, scaleTargets, 1/_wstd);
delete &inpSlc, &tgtSlc;
}
}
}
void WhitenLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad();
// OR?
//NVMatrix& tgt = _prev[replicaIdx][inpIdx]->getActsGrad();
//convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1);
//convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1);
if( _channels == 1 ) {
tgt.addProduct(v, _whiten.getTranspose(), scaleTargets, _wstd); tgt.addScalar(_wmean);
} else {
for( int i=0, j=0; i < _channels; i++, j+=_imgPixels ) {
NVMatrix& inpSlc = v.sliceRows(j,j+_imgPixels);
NVMatrix& tgtSlc = tgt.sliceRows(j,j+_imgPixels);
tgtSlc.addProduct(inpSlc, _whiten.getTranspose(), scaleTargets, _wstd); tgtSlc.addScalar(_wmean);
delete &inpSlc, &tgtSlc;
}
}
}
/*
* =====================
* IndepCrossEntCostLayer
* =====================
*/
IndepCrossEntCostLayer::IndepCrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: CostLayer(convNetThread, paramsDict, replicaID, false) {
}
// see Bishop 6.8, and eq 6.163
void IndepCrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& inputs = *_inputs[1];
NVMatrix& probs = getActs();
//int numCases = probs.getLeadingDim();
//int numOut = probs.getFollowingDim();
// http://lingpipe-blog.com/2012/02/16/howprevent-overflow-underflow-logistic-regression/
inputs.apply(NVMatrixOps::Logistic(), probs); inputs.apply(NVMatrixOps::LogLogistic(), _logProbs);
computeIndepCrossEntCost(labels, probs, _logProbs, _logError, _correctProbs);
_costv.clear(); _costv.push_back(-_logError.sum()); _costv.push_back(_numCases - _correctProbs.sum());
}
}
void IndepCrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets,
PASS_TYPE passType) {
assert(inpIdx == 1);
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = getActs();
NVMatrix& target = _prev[replicaIdx][1]->getActsGrad();
/* Could never resolve why this has to be, but independent cost only works (for labels generated in C-order with
* batches along the columns) if this layer is a NOT a transpose layer. If set as a transpose layer, it still works
* unless the activity matrix is square (noutputs == nminibatch). This worked fine in the old cuda convnet and
* after much digging was unable to figure out why it doesn't work in cuda convnet2. */
//labels.transpose(_trans); // this did not help
// all other cost layers are not transpose layers, so basically dropped the idea of supporting tranpose layer here.
// was a throwback to original implementation of independent outputs in first cuda-convnet anyways
computeIndepCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
|
230ed54e087ba19e5433ea002f210422d0fce172.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define CHECK(call) { const hipError_t error = call; if (error != hipSuccess) { printf("Error: %s:%d, ", __FILE__, __LINE__); printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); exit(1); }}
__global__ void default_name (float *default_float,int default_int) {
int k=threadIdx.x + blockDim.x * blockIdx.x;
} // End of Global
int main ()
{
int devCount,blocks,threads;
float ;
int *int_arrays;
float *float_arrays;
int *dev_int_arrays;
float *dev_float_arrays;
char buf[256];
FILE* file=fopen("default.dat","r");
FILE *ofp;
char outputFilename[] = "default.out";
CHECK (hipSetDevice ( 0 ) );
//Read a File
while (fgets(buf, sizeof (buf), file)) {
sscanf (buf, "%i\t%i\t%i",&x,&y,&z);
}
fclose (file);
//Allocate Local Array
default_array=(int *)malloc(SOMESIZE*sizeof(int));
if(default_array == NULL){
printf("Error: %s:%d, ", __FILE__, __LINE__);
exit(1);}
memset(default_array,0,SOMESIZE*sizeof(int));
//Write a File
ofp=fopen(outputFilename, "w");
for (k=0;k<points;k++){
fprintf(ofp,"%f\n",top_sum[k]/bottom_sum[k]);
}
fclose(ofp);
hipGetDeviceCount(&devCount);
//printf("CUDA Device Query...\n");
//printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i){
// Get device properties
//printf("CUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
//printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
threads=devProp.maxThreadsPerBlock;
}
blocks=ceil(float(SOMENUMBER)/float(threads))+1;
printf("Threads=%i\n",threads);
printf("Blocks=%i\n",blocks);
//Allocate on Device and Launch and Copy Back
CHECK (hipMalloc((void **) &dev_covariance, (points*points)*sizeof(float)) );
CHECK (hipMemcpy(dev_covariance, covariance, (points*points)*sizeof(float), hipMemcpyHostToDevice) );hipLaunchKernelGGL((
compute_covariance), dim3(blocks),dim3(threads), 0, 0, dev_variance,dev_covariance,points);
CHECK (hipMemcpy(covariance, dev_covariance, (points*points)*sizeof(float), hipMemcpyDeviceToHost) );
CHECK (hipFree(dev_covariance) );
CHECK (hipFree(dev_variance) );
hipDeviceReset();
//Free Allocated Arrays
free(reso);
free(top_sum);
printf("Complete!\n");
return 0;
}
| 230ed54e087ba19e5433ea002f210422d0fce172.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define CHECK(call) { const cudaError_t error = call; if (error != cudaSuccess) { printf("Error: %s:%d, ", __FILE__, __LINE__); printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); exit(1); }}
__global__ void default_name (float *default_float,int default_int) {
int k=threadIdx.x + blockDim.x * blockIdx.x;
} // End of Global
int main ()
{
int devCount,blocks,threads;
float ;
int *int_arrays;
float *float_arrays;
int *dev_int_arrays;
float *dev_float_arrays;
char buf[256];
FILE* file=fopen("default.dat","r");
FILE *ofp;
char outputFilename[] = "default.out";
CHECK (cudaSetDevice ( 0 ) );
//Read a File
while (fgets(buf, sizeof (buf), file)) {
sscanf (buf, "%i\t%i\t%i",&x,&y,&z);
}
fclose (file);
//Allocate Local Array
default_array=(int *)malloc(SOMESIZE*sizeof(int));
if(default_array == NULL){
printf("Error: %s:%d, ", __FILE__, __LINE__);
exit(1);}
memset(default_array,0,SOMESIZE*sizeof(int));
//Write a File
ofp=fopen(outputFilename, "w");
for (k=0;k<points;k++){
fprintf(ofp,"%f\n",top_sum[k]/bottom_sum[k]);
}
fclose(ofp);
cudaGetDeviceCount(&devCount);
//printf("CUDA Device Query...\n");
//printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i){
// Get device properties
//printf("CUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
//printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
threads=devProp.maxThreadsPerBlock;
}
blocks=ceil(float(SOMENUMBER)/float(threads))+1;
printf("Threads=%i\n",threads);
printf("Blocks=%i\n",blocks);
//Allocate on Device and Launch and Copy Back
CHECK (cudaMalloc((void **) &dev_covariance, (points*points)*sizeof(float)) );
CHECK (cudaMemcpy(dev_covariance, covariance, (points*points)*sizeof(float), cudaMemcpyHostToDevice) );
compute_covariance<<<blocks,threads>>>(dev_variance,dev_covariance,points);
CHECK (cudaMemcpy(covariance, dev_covariance, (points*points)*sizeof(float), cudaMemcpyDeviceToHost) );
CHECK (cudaFree(dev_covariance) );
CHECK (cudaFree(dev_variance) );
cudaDeviceReset();
//Free Allocated Arrays
free(reso);
free(top_sum);
printf("Complete!\n");
return 0;
}
|
fb3fdfa61e6cc41d4552f42dce0a6fdd090a7db2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
#include <hip/hip_runtime_api.h>
/******************************************************************************
The variable names and the function names of this program is same as provided by the university.
The added variable and function are the only changes made to this program.
Compile with:
nvcc -o Imageprocessing_cuda Imageprocessing_cuda.cu -lglut -lGL
./Imageprocessing_cuda > results.txt
******************************************************************************/
#define width 100
#define height 72
unsigned char results[width * height];
unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,255,255,0,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,255,255,255,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,255,255,255,255,255,255,0,255,255,0,0,0,0,255,255,255,255,255,
0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,255,255,255,0,255,0,0,0,0,0,
255,255,255,255,0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,255,255,255,255,0,0,0,0,0,0,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
0,0,0,0,0,255,255,255,0,0,0,0,0,0,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,0,0,0,
0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,0,0,0,0,0,255,255,0,0,0,0,0,0,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,
255,255,0,0,0,0,0,0,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,0,255,0,0,0,0,0,0,255,0,0,
0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,255,0,0,0,0,0,0,0,0,255,255,255,0,0,
0,0,0,255,255,255,255,0,0,0,0,0,0,0,255,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,
255,255,255,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,
255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,
255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,0,0,0,0,0,255,255,255,255,255,0,
0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,255,
255,255,255,255,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,255,255,255,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,
0,0,0,0,255,255,255,255,0,0,0,0,0,0,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,
255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,255,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,0,
0,0,0,0,0,0,255,0,0,0,0,0,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,0,
0,0,0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,0,0,0,0,
0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,0,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
__global__ void detect_edges(unsigned char *in, unsigned char *out) {
int i = (blockIdx.x * 72) + threadIdx.x;
int x, y; // the pixel of interest
int b, d, f, h; // the pixels adjacent to x,y used for the calculation
int r; // the result of calculate
y = i / width;
x = i - (width * y);
if (x == 0 || y == 0 || x == width - 1 || y == height - 1) {
out[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1)
+ (in[h] * -1);
if (r > 0) { // if the result is positive this is an edge pixel
out[i] = 255;
} else {
out[i] = 0;
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results);
glFlush();
}
static void key_pressed(unsigned char key, int x, int y) {
switch(key){
case 27:
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv) {
unsigned char *d_results;
unsigned char *d_image;
hipMalloc((void**)&d_image, sizeof(unsigned char) * (width * height));
hipMalloc((void**)&d_results, sizeof(unsigned char) * (width * height));
hipMemcpy(d_image, &image, sizeof(unsigned char) * (width * height), hipMemcpyHostToDevice);
signal(SIGINT, sigint_callback);
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( detect_edges), dim3(100),dim3(72), 0, 0, d_image, d_results);
hipDeviceSynchronize();
hipMemcpy(&results, d_results, sizeof(unsigned char) * (width * height), hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
hipFree(&d_image);
hipFree(&d_results);
glutInit(&argc, argv);
glutInitWindowSize(width * 2,height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("6CS005 Image Progessing Courework");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
| fb3fdfa61e6cc41d4552f42dce0a6fdd090a7db2.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
#include <cuda_runtime_api.h>
/******************************************************************************
The variable names and the function names of this program is same as provided by the university.
The added variable and function are the only changes made to this program.
Compile with:
nvcc -o Imageprocessing_cuda Imageprocessing_cuda.cu -lglut -lGL
./Imageprocessing_cuda > results.txt
******************************************************************************/
#define width 100
#define height 72
unsigned char results[width * height];
unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,255,255,0,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,255,255,255,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,255,255,255,255,255,255,0,255,255,0,0,0,0,255,255,255,255,255,
0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,255,255,255,0,255,0,0,0,0,0,
255,255,255,255,0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,255,255,255,255,0,0,0,0,0,0,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
0,0,0,0,0,255,255,255,0,0,0,0,0,0,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,0,0,0,
0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,0,0,0,0,0,255,255,0,0,0,0,0,0,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,
255,255,0,0,0,0,0,0,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,0,255,0,0,0,0,0,0,255,0,0,
0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,255,0,0,0,0,0,0,0,0,255,255,255,0,0,
0,0,0,255,255,255,255,0,0,0,0,0,0,0,255,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,
255,255,255,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,
255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,
0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,
255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,0,0,0,0,0,255,255,255,255,255,0,
0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,255,
255,255,255,255,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,255,255,255,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,
0,0,0,0,255,255,255,255,0,0,0,0,0,0,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,
255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,255,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,0,
0,0,0,0,0,0,255,0,0,0,0,0,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,0,
0,0,0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,0,0,0,0,
0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,255,255,0,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
__global__ void detect_edges(unsigned char *in, unsigned char *out) {
int i = (blockIdx.x * 72) + threadIdx.x;
int x, y; // the pixel of interest
int b, d, f, h; // the pixels adjacent to x,y used for the calculation
int r; // the result of calculate
y = i / width;
x = i - (width * y);
if (x == 0 || y == 0 || x == width - 1 || y == height - 1) {
out[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1)
+ (in[h] * -1);
if (r > 0) { // if the result is positive this is an edge pixel
out[i] = 255;
} else {
out[i] = 0;
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results);
glFlush();
}
static void key_pressed(unsigned char key, int x, int y) {
switch(key){
case 27:
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv) {
unsigned char *d_results;
unsigned char *d_image;
cudaMalloc((void**)&d_image, sizeof(unsigned char) * (width * height));
cudaMalloc((void**)&d_results, sizeof(unsigned char) * (width * height));
cudaMemcpy(d_image, &image, sizeof(unsigned char) * (width * height), cudaMemcpyHostToDevice);
signal(SIGINT, sigint_callback);
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
detect_edges<<<100,72>>>(d_image, d_results);
cudaThreadSynchronize();
cudaMemcpy(&results, d_results, sizeof(unsigned char) * (width * height), cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
cudaFree(&d_image);
cudaFree(&d_results);
glutInit(&argc, argv);
glutInitWindowSize(width * 2,height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("6CS005 Image Progessing Courework");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
|
dd42de1df8fc5285c0244cb2616eeeb42f3c12ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cudaflow.hpp>
#include <taskflow/cuda/algorithm/for_each.hpp>
#define L2(x1, y1, x2, y2) ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
template <typename T>
void run_and_wait(T& cf) {
tf::cudaStream stream;
cf.run(stream);
stream.synchronize();
}
// Each point (thread) computes its distance to each centroid
// and adds its x and y values to the sum of its closest
// centroid, as well as incrementing that centroid's count of assigned points.
__global__ void assign_clusters(
const float* px,
const float* py,
int N,
const float* mx,
const float* my,
float* sx,
float* sy,
int k,
int* c
) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= N) {
return;
}
// Make global loads once.
const float x = px[index];
const float y = py[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance = L2(x, y, mx[cluster], my[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
atomicAdd(&sx[best_cluster], x);
atomicAdd(&sy[best_cluster], y);
atomicAdd(&c [best_cluster], 1);
}
// Each thread is one cluster, which just recomputes its coordinates as the mean
// of all points assigned to it.
__global__ void compute_new_means(
float* mx, float* my, const float* sx, const float* sy, const int* c
) {
const int cluster = threadIdx.x;
const int count = max(1, c[cluster]); // turn 0/0 to 0/1
mx[cluster] = sx[cluster] / count;
my[cluster] = sy[cluster] / count;
}
// k-means clustering
void kmeans(int N, int K, int M, size_t num_cpus, size_t num_gpus) {
std::vector<float> h_px, h_py, h_mx, h_my, mx, my;
std::vector<int> c(K), best_ks(N);
std::vector<float> sx(K), sy(K);
float *d_px, *d_py, *d_mx, *d_my, *d_sx, *d_sy, *d_c;
// Randomly generate N points
for(int i=0; i<N; ++i) {
h_px.push_back(rand()%1000 - 500);
h_py.push_back(rand()%1000 - 500);
if(i < K) {
mx.push_back(h_px.back());
my.push_back(h_py.back());
h_mx.push_back(h_px.back());
h_my.push_back(h_py.back());
}
}
tf::Taskflow taskflow;
tf::Executor executor(num_cpus + num_gpus);
// cpu version
auto init = taskflow.emplace([&](){
for(int i=0; i<K; ++i) {
mx[i] = h_px[i];
my[i] = h_py[i];
}
}).name("init");
// clear the storage
auto clean_up = taskflow.emplace([&](){
for(int k=0; k<K; ++k) {
sx[k] = 0.0f;
sy[k] = 0.0f;
c [k] = 0;
}
}).name("clean_up");
tf::Task pf;
// update cluster
pf = taskflow.for_each_index(0, N, 1, [&](int i){
float x = h_px[i];
float y = h_py[i];
float best_d = std::numeric_limits<float>::max();
int best_k = 0;
for (int k = 0; k < K; ++k) {
const float d = L2(x, y, mx[k], my[k]);
if (d < best_d) {
best_d = d;
best_k = k;
}
}
best_ks[i] = best_k;
});
auto update_cluster = taskflow.emplace([&](){
for(int i=0; i<N; i++) {
sx[best_ks[i]] += h_px[i];
sy[best_ks[i]] += h_py[i];
c [best_ks[i]] += 1;
}
for(int k=0; k<K; ++k) {
auto count = max(1, c[k]); // turn 0/0 to 0/1
mx[k] = sx[k] / count;
my[k] = sy[k] / count;
}
}).name("update_cluster");
auto condition = taskflow.emplace([m=0, M]() mutable {
return (m++ < M) ? 0 : 1;
}).name("converged?");
init.precede(clean_up);
clean_up.precede(pf);
pf.precede(update_cluster);
condition.precede(clean_up)
.succeed(update_cluster);
// gpu version
auto allocate_px = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_px, N*sizeof(float)) == hipSuccess);
}).name("allocate_px");
auto allocate_py = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_py, N*sizeof(float)) == hipSuccess);
}).name("allocate_py");
auto allocate_mx = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_mx, K*sizeof(float)) == hipSuccess);
}).name("allocate_mx");
auto allocate_my = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_my, K*sizeof(float)) == hipSuccess);
}).name("allocate_my");
auto allocate_sx = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_sx, K*sizeof(float)) == hipSuccess);
}).name("allocate_sx");
auto allocate_sy = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_sy, K*sizeof(float)) == hipSuccess);
}).name("allocate_sy");
auto allocate_c = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_c, K*sizeof(float)) == hipSuccess);
}).name("allocate_c");
auto h2d = taskflow.emplace([&](){
tf::cudaFlow cf;
cf.copy(d_px, h_px.data(), N).name("h2d_px");
cf.copy(d_py, h_py.data(), N).name("h2d_py");
cf.copy(d_mx, h_mx.data(), K).name("h2d_mx");
cf.copy(d_my, h_my.data(), K).name("h2d_my");
run_and_wait(cf);
}).name("h2d");
auto kmeans = taskflow.emplace([&](){
tf::cudaFlow cf;
auto zero_c = cf.zero(d_c, K).name("zero_c");
auto zero_sx = cf.zero(d_sx, K).name("zero_sx");
auto zero_sy = cf.zero(d_sy, K).name("zero_sy");
auto cluster = cf.kernel(
(N+1024-1) / 1024, 1024, 0,
assign_clusters, d_px, d_py, N, d_mx, d_my, d_sx, d_sy, K, d_c
).name("cluster");
auto new_centroid = cf.kernel(
1, K, 0,
compute_new_means, d_mx, d_my, d_sx, d_sy, d_c
).name("new_centroid");
cluster.precede(new_centroid)
.succeed(zero_c, zero_sx, zero_sy);
run_and_wait(cf);
}).name("update_means");
auto gpu_condition = taskflow.emplace([i=0, M] () mutable {
return i++ < M ? 0 : 1;
}).name("converged?");
auto stop = taskflow.emplace([&](){
tf::cudaFlow cf;
cf.copy(h_mx.data(), d_mx, K).name("d2h_mx");
cf.copy(h_my.data(), d_my, K).name("d2h_my");
run_and_wait(cf);
}).name("stop");
auto free = taskflow.emplace([&](){
REQUIRE(hipFree(d_px)==hipSuccess);
REQUIRE(hipFree(d_py)==hipSuccess);
REQUIRE(hipFree(d_mx)==hipSuccess);
REQUIRE(hipFree(d_my)==hipSuccess);
REQUIRE(hipFree(d_sx)==hipSuccess);
REQUIRE(hipFree(d_sy)==hipSuccess);
REQUIRE(hipFree(d_c )==hipSuccess);
}).name("free");
// build up the dependency
h2d.succeed(allocate_px, allocate_py, allocate_mx, allocate_my);
kmeans.succeed(allocate_sx, allocate_sy, allocate_c, h2d)
.precede(gpu_condition);
gpu_condition.precede(kmeans, stop);
stop.precede(free);
executor.run(taskflow).wait();
//taskflow.dump(std::cout);
for(int k=0; k<K; k++) {
REQUIRE(::fabs(h_mx[k] - mx[k]) < 1.0f);
REQUIRE(::fabs(h_my[k] - my[k]) < 1.0f);
}
}
TEST_CASE("kmeans.10.1C1G") {
kmeans(10, 2, 10, 1, 1);
}
TEST_CASE("kmeans.10.1C2G") {
kmeans(10, 2, 10, 1, 2);
}
TEST_CASE("kmeans.10.1C3G") {
kmeans(10, 2, 10, 1, 3);
}
TEST_CASE("kmeans.10.1C4G") {
kmeans(10, 2, 10, 1, 4);
}
TEST_CASE("kmeans.10.2C1G") {
kmeans(10, 2, 10, 2, 1);
}
TEST_CASE("kmeans.10.2C2G") {
kmeans(10, 2, 10, 2, 2);
}
TEST_CASE("kmeans.10.2C3G") {
kmeans(10, 2, 10, 2, 3);
}
TEST_CASE("kmeans.10.2C4G") {
kmeans(10, 2, 10, 2, 4);
}
TEST_CASE("kmeans.10.4C1G") {
kmeans(10, 2, 10, 4, 1);
}
TEST_CASE("kmeans.10.4C2G") {
kmeans(10, 2, 10, 4, 2);
}
TEST_CASE("kmeans.10.4C3G") {
kmeans(10, 2, 10, 4, 3);
}
TEST_CASE("kmeans.10.4C4G") {
kmeans(10, 2, 10, 4, 4);
}
TEST_CASE("kmeans.100.1C1G") {
kmeans(100, 4, 100, 1, 1);
}
TEST_CASE("kmeans.100.2C2G") {
kmeans(100, 4, 100, 2, 2);
}
TEST_CASE("kmeans.100.3C3G") {
kmeans(100, 4, 100, 3, 3);
}
TEST_CASE("kmeans.100.4C4G") {
kmeans(100, 4, 100, 4, 4);
}
TEST_CASE("kmeans.1000.1C1G") {
kmeans(1000, 8, 1000, 1, 1);
}
TEST_CASE("kmeans.1000.2C2G") {
kmeans(1000, 8, 1000, 2, 2);
}
TEST_CASE("kmeans.1000.4C4G") {
kmeans(1000, 8, 1000, 4, 4);
}
TEST_CASE("kmeans.1000.8C8G") {
kmeans(1000, 8, 1000, 8, 8);
}
TEST_CASE("kmeans.1000.16C16G") {
kmeans(1000, 8, 1000, 16, 16);
}
| dd42de1df8fc5285c0244cb2616eeeb42f3c12ec.cu | #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cudaflow.hpp>
#include <taskflow/cuda/algorithm/for_each.hpp>
#define L2(x1, y1, x2, y2) ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
template <typename T>
void run_and_wait(T& cf) {
tf::cudaStream stream;
cf.run(stream);
stream.synchronize();
}
// Each point (thread) computes its distance to each centroid
// and adds its x and y values to the sum of its closest
// centroid, as well as incrementing that centroid's count of assigned points.
__global__ void assign_clusters(
const float* px,
const float* py,
int N,
const float* mx,
const float* my,
float* sx,
float* sy,
int k,
int* c
) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= N) {
return;
}
// Make global loads once.
const float x = px[index];
const float y = py[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance = L2(x, y, mx[cluster], my[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
atomicAdd(&sx[best_cluster], x);
atomicAdd(&sy[best_cluster], y);
atomicAdd(&c [best_cluster], 1);
}
// Each thread is one cluster, which just recomputes its coordinates as the mean
// of all points assigned to it.
__global__ void compute_new_means(
float* mx, float* my, const float* sx, const float* sy, const int* c
) {
const int cluster = threadIdx.x;
const int count = max(1, c[cluster]); // turn 0/0 to 0/1
mx[cluster] = sx[cluster] / count;
my[cluster] = sy[cluster] / count;
}
// k-means clustering
void kmeans(int N, int K, int M, size_t num_cpus, size_t num_gpus) {
std::vector<float> h_px, h_py, h_mx, h_my, mx, my;
std::vector<int> c(K), best_ks(N);
std::vector<float> sx(K), sy(K);
float *d_px, *d_py, *d_mx, *d_my, *d_sx, *d_sy, *d_c;
// Randomly generate N points
for(int i=0; i<N; ++i) {
h_px.push_back(rand()%1000 - 500);
h_py.push_back(rand()%1000 - 500);
if(i < K) {
mx.push_back(h_px.back());
my.push_back(h_py.back());
h_mx.push_back(h_px.back());
h_my.push_back(h_py.back());
}
}
tf::Taskflow taskflow;
tf::Executor executor(num_cpus + num_gpus);
// cpu version
auto init = taskflow.emplace([&](){
for(int i=0; i<K; ++i) {
mx[i] = h_px[i];
my[i] = h_py[i];
}
}).name("init");
// clear the storage
auto clean_up = taskflow.emplace([&](){
for(int k=0; k<K; ++k) {
sx[k] = 0.0f;
sy[k] = 0.0f;
c [k] = 0;
}
}).name("clean_up");
tf::Task pf;
// update cluster
pf = taskflow.for_each_index(0, N, 1, [&](int i){
float x = h_px[i];
float y = h_py[i];
float best_d = std::numeric_limits<float>::max();
int best_k = 0;
for (int k = 0; k < K; ++k) {
const float d = L2(x, y, mx[k], my[k]);
if (d < best_d) {
best_d = d;
best_k = k;
}
}
best_ks[i] = best_k;
});
auto update_cluster = taskflow.emplace([&](){
for(int i=0; i<N; i++) {
sx[best_ks[i]] += h_px[i];
sy[best_ks[i]] += h_py[i];
c [best_ks[i]] += 1;
}
for(int k=0; k<K; ++k) {
auto count = max(1, c[k]); // turn 0/0 to 0/1
mx[k] = sx[k] / count;
my[k] = sy[k] / count;
}
}).name("update_cluster");
auto condition = taskflow.emplace([m=0, M]() mutable {
return (m++ < M) ? 0 : 1;
}).name("converged?");
init.precede(clean_up);
clean_up.precede(pf);
pf.precede(update_cluster);
condition.precede(clean_up)
.succeed(update_cluster);
// gpu version
auto allocate_px = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_px, N*sizeof(float)) == cudaSuccess);
}).name("allocate_px");
auto allocate_py = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_py, N*sizeof(float)) == cudaSuccess);
}).name("allocate_py");
auto allocate_mx = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_mx, K*sizeof(float)) == cudaSuccess);
}).name("allocate_mx");
auto allocate_my = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_my, K*sizeof(float)) == cudaSuccess);
}).name("allocate_my");
auto allocate_sx = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_sx, K*sizeof(float)) == cudaSuccess);
}).name("allocate_sx");
auto allocate_sy = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_sy, K*sizeof(float)) == cudaSuccess);
}).name("allocate_sy");
auto allocate_c = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_c, K*sizeof(float)) == cudaSuccess);
}).name("allocate_c");
auto h2d = taskflow.emplace([&](){
tf::cudaFlow cf;
cf.copy(d_px, h_px.data(), N).name("h2d_px");
cf.copy(d_py, h_py.data(), N).name("h2d_py");
cf.copy(d_mx, h_mx.data(), K).name("h2d_mx");
cf.copy(d_my, h_my.data(), K).name("h2d_my");
run_and_wait(cf);
}).name("h2d");
auto kmeans = taskflow.emplace([&](){
tf::cudaFlow cf;
auto zero_c = cf.zero(d_c, K).name("zero_c");
auto zero_sx = cf.zero(d_sx, K).name("zero_sx");
auto zero_sy = cf.zero(d_sy, K).name("zero_sy");
auto cluster = cf.kernel(
(N+1024-1) / 1024, 1024, 0,
assign_clusters, d_px, d_py, N, d_mx, d_my, d_sx, d_sy, K, d_c
).name("cluster");
auto new_centroid = cf.kernel(
1, K, 0,
compute_new_means, d_mx, d_my, d_sx, d_sy, d_c
).name("new_centroid");
cluster.precede(new_centroid)
.succeed(zero_c, zero_sx, zero_sy);
run_and_wait(cf);
}).name("update_means");
auto gpu_condition = taskflow.emplace([i=0, M] () mutable {
return i++ < M ? 0 : 1;
}).name("converged?");
auto stop = taskflow.emplace([&](){
tf::cudaFlow cf;
cf.copy(h_mx.data(), d_mx, K).name("d2h_mx");
cf.copy(h_my.data(), d_my, K).name("d2h_my");
run_and_wait(cf);
}).name("stop");
auto free = taskflow.emplace([&](){
REQUIRE(cudaFree(d_px)==cudaSuccess);
REQUIRE(cudaFree(d_py)==cudaSuccess);
REQUIRE(cudaFree(d_mx)==cudaSuccess);
REQUIRE(cudaFree(d_my)==cudaSuccess);
REQUIRE(cudaFree(d_sx)==cudaSuccess);
REQUIRE(cudaFree(d_sy)==cudaSuccess);
REQUIRE(cudaFree(d_c )==cudaSuccess);
}).name("free");
// build up the dependency
h2d.succeed(allocate_px, allocate_py, allocate_mx, allocate_my);
kmeans.succeed(allocate_sx, allocate_sy, allocate_c, h2d)
.precede(gpu_condition);
gpu_condition.precede(kmeans, stop);
stop.precede(free);
executor.run(taskflow).wait();
//taskflow.dump(std::cout);
for(int k=0; k<K; k++) {
REQUIRE(std::fabs(h_mx[k] - mx[k]) < 1.0f);
REQUIRE(std::fabs(h_my[k] - my[k]) < 1.0f);
}
}
TEST_CASE("kmeans.10.1C1G") {
kmeans(10, 2, 10, 1, 1);
}
TEST_CASE("kmeans.10.1C2G") {
kmeans(10, 2, 10, 1, 2);
}
TEST_CASE("kmeans.10.1C3G") {
kmeans(10, 2, 10, 1, 3);
}
TEST_CASE("kmeans.10.1C4G") {
kmeans(10, 2, 10, 1, 4);
}
TEST_CASE("kmeans.10.2C1G") {
kmeans(10, 2, 10, 2, 1);
}
TEST_CASE("kmeans.10.2C2G") {
kmeans(10, 2, 10, 2, 2);
}
TEST_CASE("kmeans.10.2C3G") {
kmeans(10, 2, 10, 2, 3);
}
TEST_CASE("kmeans.10.2C4G") {
kmeans(10, 2, 10, 2, 4);
}
TEST_CASE("kmeans.10.4C1G") {
kmeans(10, 2, 10, 4, 1);
}
TEST_CASE("kmeans.10.4C2G") {
kmeans(10, 2, 10, 4, 2);
}
TEST_CASE("kmeans.10.4C3G") {
kmeans(10, 2, 10, 4, 3);
}
TEST_CASE("kmeans.10.4C4G") {
kmeans(10, 2, 10, 4, 4);
}
TEST_CASE("kmeans.100.1C1G") {
kmeans(100, 4, 100, 1, 1);
}
TEST_CASE("kmeans.100.2C2G") {
kmeans(100, 4, 100, 2, 2);
}
TEST_CASE("kmeans.100.3C3G") {
kmeans(100, 4, 100, 3, 3);
}
TEST_CASE("kmeans.100.4C4G") {
kmeans(100, 4, 100, 4, 4);
}
TEST_CASE("kmeans.1000.1C1G") {
kmeans(1000, 8, 1000, 1, 1);
}
TEST_CASE("kmeans.1000.2C2G") {
kmeans(1000, 8, 1000, 2, 2);
}
TEST_CASE("kmeans.1000.4C4G") {
kmeans(1000, 8, 1000, 4, 4);
}
TEST_CASE("kmeans.1000.8C8G") {
kmeans(1000, 8, 1000, 8, 8);
}
TEST_CASE("kmeans.1000.16C16G") {
kmeans(1000, 8, 1000, 16, 16);
}
|
1142792848fef300c2ee57f84244ac83e342a868.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void test(float& a, float& b) {
do
{
}while(1);
}
| 1142792848fef300c2ee57f84244ac83e342a868.cu | __global__ void test(float& a, float& b) {
do
{
}while(1);
}
|
bd5aff4c21d3a2fe6a75593512cc0f58d972cff9.hip | // !!! This is a file automatically generated by hipify!!!
/*
swsharp - CUDA parallelized Smith Waterman with applying Hirschberg's and
Ukkonen's algorithm and dynamic cell pruning.
Copyright (C) 2013 Matija Korpar, contributor Mile iki
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact the author by [email protected].
*/
#ifndef __HIPCC__
#include "chain.h"
#include "error.h"
#include "scorer.h"
#include "thread.h"
#include "gpu_module.h"
static const char* errorMessage = "CUDA not available";
struct ChainDatabaseGpu {
};
extern void hwEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread) {
ERROR("%s", errorMessage);
}
extern void nwFindScoreGpu(int* queryStart, int* targetStart, Chain* query,
int queryFrontGap, Chain* target, Scorer* scorer, int score, int card,
Thread* thread) {
ERROR("%s", errorMessage);
}
extern void nwLinearDataGpu(int** scores, int** affines, Chain* query,
int queryFrontGap, Chain* target, int targetFrontGap, Scorer* scorer,
int pLeft, int pRight, int card, Thread* thread) {
ERROR("%s", errorMessage);
}
extern void ovEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread) {
ERROR("%s", errorMessage);
}
extern void ovFindScoreGpu(int* queryStart, int* targetStart, Chain* query,
Chain* target, Scorer* scorer, int score, int card, Thread* thread) {
ERROR("%s", errorMessage);
}
extern void swEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
int** scores, int** affines, Chain* query, Chain* target, Scorer* scorer,
int score, int card, Thread* thread) {
ERROR("%s", errorMessage);
}
extern ChainDatabaseGpu* chainDatabaseGpuCreate(Chain** database, int databaseLen,
int* cards, int cardsLen) {
return NULL;
}
extern void chainDatabaseGpuDelete(ChainDatabaseGpu* chainDatabaseGpu) {
}
extern size_t chainDatabaseGpuMemoryConsumption(Chain** database, int databaseLen) {
return 0;
}
extern void scoreDatabaseGpu(int** scores, int type, Chain* query,
ChainDatabaseGpu* chainDatabaseGpu, Scorer* scorer, int* indexes,
int indexesLen, int* cards, int cardsLen, Thread* thread) {
ERROR("%s", errorMessage);
}
extern void scoreDatabasesGpu(int** scores, int type, Chain** queries,
int queriesLen, ChainDatabaseGpu* chainDatabaseGpu, Scorer* scorer,
int* indexes, int indexesLen, int* cards, int cardsLen, Thread* thread) {
ERROR("%s", errorMessage);
}
#endif // __HIPCC__
| bd5aff4c21d3a2fe6a75593512cc0f58d972cff9.cu | /*
swsharp - CUDA parallelized Smith Waterman with applying Hirschberg's and
Ukkonen's algorithm and dynamic cell pruning.
Copyright (C) 2013 Matija Korpar, contributor Mile Šikić
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact the author by [email protected].
*/
#ifndef __CUDACC__
#include "chain.h"
#include "error.h"
#include "scorer.h"
#include "thread.h"
#include "gpu_module.h"
static const char* errorMessage = "CUDA not available";
struct ChainDatabaseGpu {
};
extern void hwEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread) {
ERROR("%s", errorMessage);
}
extern void nwFindScoreGpu(int* queryStart, int* targetStart, Chain* query,
int queryFrontGap, Chain* target, Scorer* scorer, int score, int card,
Thread* thread) {
ERROR("%s", errorMessage);
}
extern void nwLinearDataGpu(int** scores, int** affines, Chain* query,
int queryFrontGap, Chain* target, int targetFrontGap, Scorer* scorer,
int pLeft, int pRight, int card, Thread* thread) {
ERROR("%s", errorMessage);
}
extern void ovEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread) {
ERROR("%s", errorMessage);
}
extern void ovFindScoreGpu(int* queryStart, int* targetStart, Chain* query,
Chain* target, Scorer* scorer, int score, int card, Thread* thread) {
ERROR("%s", errorMessage);
}
extern void swEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
int** scores, int** affines, Chain* query, Chain* target, Scorer* scorer,
int score, int card, Thread* thread) {
ERROR("%s", errorMessage);
}
extern ChainDatabaseGpu* chainDatabaseGpuCreate(Chain** database, int databaseLen,
int* cards, int cardsLen) {
return NULL;
}
extern void chainDatabaseGpuDelete(ChainDatabaseGpu* chainDatabaseGpu) {
}
extern size_t chainDatabaseGpuMemoryConsumption(Chain** database, int databaseLen) {
return 0;
}
extern void scoreDatabaseGpu(int** scores, int type, Chain* query,
ChainDatabaseGpu* chainDatabaseGpu, Scorer* scorer, int* indexes,
int indexesLen, int* cards, int cardsLen, Thread* thread) {
ERROR("%s", errorMessage);
}
extern void scoreDatabasesGpu(int** scores, int type, Chain** queries,
int queriesLen, ChainDatabaseGpu* chainDatabaseGpu, Scorer* scorer,
int* indexes, int indexesLen, int* cards, int cardsLen, Thread* thread) {
ERROR("%s", errorMessage);
}
#endif // __CUDACC__
|
4b5cc597e6be21a84a4dc8e119322d2c0608f391.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include "loops/scalar.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <system/op_boilerplate.h>
#include <helpers/TAD.h>
#include <types/types.h>
namespace functions {
namespace scalar {
}
} | 4b5cc597e6be21a84a4dc8e119322d2c0608f391.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include "loops/scalar.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <system/op_boilerplate.h>
#include <helpers/TAD.h>
#include <types/types.h>
namespace functions {
namespace scalar {
}
} |
d8032930f66590712af03b8b9cebdafae0ed10fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <vector>
#include "caffe/layers/rbf_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void RBFForward(const int n_kernels, const int n_channels,
const int height, const int width,
const Dtype* bottom_data, const Dtype* mu, const Dtype* var, Dtype* top_data)
{
#define RBF_EPSILON 0.0000001
// run once per batch index
const int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_index < n_kernels)
{
const int c = thread_index / (height * width);
const int h = (thread_index / width) % height;
const int w = thread_index % width;
const Dtype mu_= mu[c];
const Dtype var_= var[c];
const int spatial_offset = ( c * height + h ) * width + w;
const Dtype bottom_data_ = bottom_data[spatial_offset];
Dtype top_data_ = top_data[spatial_offset];
const Dtype x_minus_mu = bottom_data_ - mu_;
// y = exp(inf) = 0 --> in the backward pass log(y) = -inf
// to prevent that y -> y + epsilon and take max(y,1) which ensure that rbf output is >0 and <1
top_data_ = min(exp(-((x_minus_mu * x_minus_mu) / (2* var_))) + RBF_EPSILON, 1.0);
}
}
template <typename Dtype>
void RBFLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
const Dtype* mu = this->blobs_[0]->gpu_data();
const Dtype* var = this->blobs_[1]->gpu_data();
const int num = bottom[0]->shape(0);
const int n_channels = bottom[0]->shape(1);
const int height = bottom[0]->shape(2);
const int width = bottom[0]->shape(3);
const int n_kernels = n_channels * height * width;
for(int i=0; i< n_channels; i++)
{
const Dtype* bottom_data_ = bottom[0]->gpu_data() + bottom[0]->offset(i,0,0,0);
Dtype* top_data = top[0]->mutable_gpu_data() + top[0]->offset(i,0,0,0);
hipLaunchKernelGGL(( RBFForward), dim3(CAFFE_GET_BLOCKS(n_kernels)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
n_kernels,
n_channels,
height,
width,
bottom_data_,
mu,
var,
top_data);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
__global__ void RBFBackward(const int n_kernels, const int n_channels,
const int height, const int width,
const Dtype* top_data, const Dtype* top_diff,
const Dtype* var, Dtype* bottom_diff )
{
// run once per batch index
const int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_index < n_kernels)
{
const int c = thread_index / (height * width);
const int h = (thread_index / width) % height;
const int w = thread_index % width;
const Dtype var_= var[c];
const int spatial_offset = ( c * height + h ) * width + w;
const Dtype top_data_= top_data[spatial_offset];
const Dtype top_diff_= top_diff[spatial_offset];
Dtype bottom_diff_= bottom_diff[spatial_offset];
bottom_diff_ = top_diff_ * ( - top_data_ * sqrt(-((2 * log(top_data_)) / var_)) );
}
}
template <typename Dtype>
void RBFLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom)
{
const int num = bottom[0]->shape(0);
const int n_channels = bottom[0]->shape(1);
const int height = bottom[0]->shape(2);
const int width = bottom[0]->shape(3);
const int n_kernels = n_channels * height * width;
// Backward diff
if(propagate_down[0])
{
const Dtype* var = this->blobs_[1]->gpu_data();
for(int i=0; i< n_channels; i++)
{
Dtype* bottom_diff_ = bottom[0]->mutable_gpu_diff() + bottom[0]->offset(i,0,0,0);
const Dtype* top_data = top[0]->gpu_data() + top[0]->offset(i,0,0,0);
const Dtype* top_diff = top[0]->gpu_diff() + top[0]->offset(i,0,0,0);
hipLaunchKernelGGL(( RBFBackward), dim3(CAFFE_GET_BLOCKS(n_kernels)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
n_kernels,
n_channels,
height,
width,
top_data,
top_diff,
var,
bottom_diff_);
CUDA_POST_KERNEL_CHECK;
}
}
// Parameter gradient calculation
if(this->param_propagate_down(0))
{
// using the memory of bottom diff multipied with -1 will give gradient of mu
// bcz of (x-mu) operation.
Dtype* mu_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* bottom_diff = bottom[0]->gpu_diff();
Dtype* spatial_diff = spatial_diff_.mutable_gpu_diff();
// grad mu accumulation over spatial dims
caffe_gpu_gemv(CblasNoTrans, num * n_channels, height * width, Dtype(1), bottom_diff, spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_diff);
// the -1 is to get gradient w.r.t. mu
// grad mu accumulation over batch
caffe_gpu_gemv(CblasTrans, num, n_channels, Dtype(-1), spatial_diff, batch_sum_multiplier_.gpu_data(), Dtype(0), mu_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(RBFLayer);
} // namespace caffe
| d8032930f66590712af03b8b9cebdafae0ed10fc.cu | #include <cmath>
#include <vector>
#include "caffe/layers/rbf_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void RBFForward(const int n_kernels, const int n_channels,
const int height, const int width,
const Dtype* bottom_data, const Dtype* mu, const Dtype* var, Dtype* top_data)
{
#define RBF_EPSILON 0.0000001
// run once per batch index
const int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_index < n_kernels)
{
const int c = thread_index / (height * width);
const int h = (thread_index / width) % height;
const int w = thread_index % width;
const Dtype mu_= mu[c];
const Dtype var_= var[c];
const int spatial_offset = ( c * height + h ) * width + w;
const Dtype bottom_data_ = bottom_data[spatial_offset];
Dtype top_data_ = top_data[spatial_offset];
const Dtype x_minus_mu = bottom_data_ - mu_;
// y = exp(inf) = 0 --> in the backward pass log(y) = -inf
// to prevent that y -> y + epsilon and take max(y,1) which ensure that rbf output is >0 and <1
top_data_ = min(exp(-((x_minus_mu * x_minus_mu) / (2* var_))) + RBF_EPSILON, 1.0);
}
}
template <typename Dtype>
void RBFLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
const Dtype* mu = this->blobs_[0]->gpu_data();
const Dtype* var = this->blobs_[1]->gpu_data();
const int num = bottom[0]->shape(0);
const int n_channels = bottom[0]->shape(1);
const int height = bottom[0]->shape(2);
const int width = bottom[0]->shape(3);
const int n_kernels = n_channels * height * width;
for(int i=0; i< n_channels; i++)
{
const Dtype* bottom_data_ = bottom[0]->gpu_data() + bottom[0]->offset(i,0,0,0);
Dtype* top_data = top[0]->mutable_gpu_data() + top[0]->offset(i,0,0,0);
RBFForward<<< CAFFE_GET_BLOCKS(n_kernels), CAFFE_CUDA_NUM_THREADS >>>(
n_kernels,
n_channels,
height,
width,
bottom_data_,
mu,
var,
top_data);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
__global__ void RBFBackward(const int n_kernels, const int n_channels,
const int height, const int width,
const Dtype* top_data, const Dtype* top_diff,
const Dtype* var, Dtype* bottom_diff )
{
// run once per batch index
const int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_index < n_kernels)
{
const int c = thread_index / (height * width);
const int h = (thread_index / width) % height;
const int w = thread_index % width;
const Dtype var_= var[c];
const int spatial_offset = ( c * height + h ) * width + w;
const Dtype top_data_= top_data[spatial_offset];
const Dtype top_diff_= top_diff[spatial_offset];
Dtype bottom_diff_= bottom_diff[spatial_offset];
bottom_diff_ = top_diff_ * ( - top_data_ * sqrt(-((2 * log(top_data_)) / var_)) );
}
}
template <typename Dtype>
void RBFLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom)
{
const int num = bottom[0]->shape(0);
const int n_channels = bottom[0]->shape(1);
const int height = bottom[0]->shape(2);
const int width = bottom[0]->shape(3);
const int n_kernels = n_channels * height * width;
// Backward diff
if(propagate_down[0])
{
const Dtype* var = this->blobs_[1]->gpu_data();
for(int i=0; i< n_channels; i++)
{
Dtype* bottom_diff_ = bottom[0]->mutable_gpu_diff() + bottom[0]->offset(i,0,0,0);
const Dtype* top_data = top[0]->gpu_data() + top[0]->offset(i,0,0,0);
const Dtype* top_diff = top[0]->gpu_diff() + top[0]->offset(i,0,0,0);
RBFBackward<<< CAFFE_GET_BLOCKS(n_kernels), CAFFE_CUDA_NUM_THREADS >>>(
n_kernels,
n_channels,
height,
width,
top_data,
top_diff,
var,
bottom_diff_);
CUDA_POST_KERNEL_CHECK;
}
}
// Parameter gradient calculation
if(this->param_propagate_down(0))
{
// using the memory of bottom diff multipied with -1 will give gradient of mu
// bcz of (x-mu) operation.
Dtype* mu_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* bottom_diff = bottom[0]->gpu_diff();
Dtype* spatial_diff = spatial_diff_.mutable_gpu_diff();
// grad mu accumulation over spatial dims
caffe_gpu_gemv(CblasNoTrans, num * n_channels, height * width, Dtype(1), bottom_diff, spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_diff);
// the -1 is to get gradient w.r.t. mu
// grad mu accumulation over batch
caffe_gpu_gemv(CblasTrans, num, n_channels, Dtype(-1), spatial_diff, batch_sum_multiplier_.gpu_data(), Dtype(0), mu_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(RBFLayer);
} // namespace caffe
|
3d299a316346cb539700d99c67be89e3b5428311.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cstring>
#include "common.h"
//hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void sum_array_gpu(int* a, int* b, int* c, int size)
{
int gid = blockIdx.x + blockDim.x + threadIdx.x;
if (gid < size)
{
c[gid] = a[gid] + b[gid];
}
}
void sum_array_cpu(int* a, int* b, int* c, int size)
{
for (int i = 0; i < size; i++)
{
c[i] = a[i] + b[i];
}
}
int main()
{
int size = 10000;
int block_size = 128;
int NO_BYTES = size * sizeof(int);
//host pointers
int* h_a, * h_b, * gpu_results, *h_c;
//allocate memory for host pointers
h_a = (int*)malloc(NO_BYTES);
h_b = (int*)malloc(NO_BYTES);
gpu_results = (int*)malloc(NO_BYTES);
h_c = (int*)malloc(NO_BYTES);
//initialize host pointer
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; i++)
{
h_a[i] = (int)(rand() & 0xFF);
}
for (int i = 0; i < size; i++)
{
h_b[i] = (int)(rand() & 0xFF);
}
sum_array_cpu(h_a, h_b, h_c, size);
memset(gpu_results, 0, NO_BYTES);
//device pointer
int* d_a, * d_b, * d_c;
hipMalloc((int**)& d_a, NO_BYTES);
hipMalloc((int**)& d_b, NO_BYTES);
hipMalloc((int**)& d_c, NO_BYTES);
hipMemcpy(d_a, h_a, NO_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, NO_BYTES, hipMemcpyHostToDevice);
//launching the grid
dim3 block(block_size);
dim3 grid((size / block.x)+1);
sum_array_gpu << <grid, block >> > (d_a, d_b, d_c, size);
hipDeviceSynchronize();
//memory transfer back to host
hipMemcpy(gpu_results, d_c, NO_BYTES, hipMemcpyDeviceToHost);
//array comparison
compare_arrays(gpu_results, h_c,size);
hipFree(d_c);
hipFree(d_b);
hipFree(d_a);
free(gpu_results);
free(h_a);
free(h_b);
free(gpu_results);
hipDeviceReset();
return 0;
} | 3d299a316346cb539700d99c67be89e3b5428311.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cstring>
#include "common.h"
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void sum_array_gpu(int* a, int* b, int* c, int size)
{
int gid = blockIdx.x + blockDim.x + threadIdx.x;
if (gid < size)
{
c[gid] = a[gid] + b[gid];
}
}
void sum_array_cpu(int* a, int* b, int* c, int size)
{
for (int i = 0; i < size; i++)
{
c[i] = a[i] + b[i];
}
}
int main()
{
int size = 10000;
int block_size = 128;
int NO_BYTES = size * sizeof(int);
//host pointers
int* h_a, * h_b, * gpu_results, *h_c;
//allocate memory for host pointers
h_a = (int*)malloc(NO_BYTES);
h_b = (int*)malloc(NO_BYTES);
gpu_results = (int*)malloc(NO_BYTES);
h_c = (int*)malloc(NO_BYTES);
//initialize host pointer
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; i++)
{
h_a[i] = (int)(rand() & 0xFF);
}
for (int i = 0; i < size; i++)
{
h_b[i] = (int)(rand() & 0xFF);
}
sum_array_cpu(h_a, h_b, h_c, size);
memset(gpu_results, 0, NO_BYTES);
//device pointer
int* d_a, * d_b, * d_c;
cudaMalloc((int**)& d_a, NO_BYTES);
cudaMalloc((int**)& d_b, NO_BYTES);
cudaMalloc((int**)& d_c, NO_BYTES);
cudaMemcpy(d_a, h_a, NO_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, NO_BYTES, cudaMemcpyHostToDevice);
//launching the grid
dim3 block(block_size);
dim3 grid((size / block.x)+1);
sum_array_gpu << <grid, block >> > (d_a, d_b, d_c, size);
cudaDeviceSynchronize();
//memory transfer back to host
cudaMemcpy(gpu_results, d_c, NO_BYTES, cudaMemcpyDeviceToHost);
//array comparison
compare_arrays(gpu_results, h_c,size);
cudaFree(d_c);
cudaFree(d_b);
cudaFree(d_a);
free(gpu_results);
free(h_a);
free(h_b);
free(gpu_results);
cudaDeviceReset();
return 0;
} |
d70d77d97d35c9e720f91fa5887df4e5174c211a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Sergio Mercado A01020382
* Cuda : normal to soviet image
*/
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <opencv/highgui.h>
//#include "utils/cheader.h"
typedef enum color {BLUE, GREEN, RED} Color;
__global__ void red(unsigned char *src, unsigned char *dest, int width, int height, int step, int channels){
int ren, col;
float r, g, b;
ren = blockIdx.x;
col = threadIdx.x;
r = 0; g = 0; b = 0;
r = (float) src[(ren * step) + (col * channels) + RED];
g = (float) src[(ren * step) + (col * channels) + GREEN];
b = (float) src[(ren * step) + (col * channels) + BLUE];
//Set only the desired rgb value
dest[(ren * step) + (col * channels) + RED] = (unsigned char) (0xFF);
dest[(ren * step) + (col * channels) + GREEN] = (unsigned char) (g);
dest[(ren * step) + (col * channels) + BLUE] = (unsigned char) (b);
}
int main(int argc, char* argv[]) {
int step, size;
unsigned char *dev_src, *dev_dest;
//obtain image from source
IplImage *src = cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR);
IplImage *dest = cvCreateImage(cvSize(src->width, src->height), IPL_DEPTH_8U, 3);
size = src->width * src->height * src->nChannels * sizeof(uchar);
step = src->widthStep / sizeof(uchar);
//Memory allocation on gpu
hipMalloc((void**) &dev_src, size);
hipMalloc((void**) &dev_dest, size);
hipMemcpy(dev_src, src->imageData, size, hipMemcpyHostToDevice);
//Write to file
hipLaunchKernelGGL(( red), dim3(src->height), dim3(src->width), 0, 0, dev_src, dev_dest, src->width, src->height, step, src->nChannels);
hipMemcpy(dest->imageData, dev_dest, size, hipMemcpyDeviceToHost);
cvSaveImage("result.png", dest);
hipFree(dev_dest);
hipFree(dev_src);
return 0;
}
| d70d77d97d35c9e720f91fa5887df4e5174c211a.cu | /*
* Sergio Mercado A01020382
* Cuda : normal to soviet image
*/
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <opencv/highgui.h>
//#include "utils/cheader.h"
typedef enum color {BLUE, GREEN, RED} Color;
__global__ void red(unsigned char *src, unsigned char *dest, int width, int height, int step, int channels){
int ren, col;
float r, g, b;
ren = blockIdx.x;
col = threadIdx.x;
r = 0; g = 0; b = 0;
r = (float) src[(ren * step) + (col * channels) + RED];
g = (float) src[(ren * step) + (col * channels) + GREEN];
b = (float) src[(ren * step) + (col * channels) + BLUE];
//Set only the desired rgb value
dest[(ren * step) + (col * channels) + RED] = (unsigned char) (0xFF);
dest[(ren * step) + (col * channels) + GREEN] = (unsigned char) (g);
dest[(ren * step) + (col * channels) + BLUE] = (unsigned char) (b);
}
int main(int argc, char* argv[]) {
int step, size;
unsigned char *dev_src, *dev_dest;
//obtain image from source
IplImage *src = cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR);
IplImage *dest = cvCreateImage(cvSize(src->width, src->height), IPL_DEPTH_8U, 3);
size = src->width * src->height * src->nChannels * sizeof(uchar);
step = src->widthStep / sizeof(uchar);
//Memory allocation on gpu
cudaMalloc((void**) &dev_src, size);
cudaMalloc((void**) &dev_dest, size);
cudaMemcpy(dev_src, src->imageData, size, cudaMemcpyHostToDevice);
//Write to file
red<<<src->height, src->width>>>(dev_src, dev_dest, src->width, src->height, step, src->nChannels);
cudaMemcpy(dest->imageData, dev_dest, size, cudaMemcpyDeviceToHost);
cvSaveImage("result.png", dest);
cudaFree(dev_dest);
cudaFree(dev_src);
return 0;
}
|
nearest_interp_compute.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/nearest_interp_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
using Tensor = lite::Tensor;
__global__ void KeNearestNeighborInterp(const float* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
float* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int out_img_idx = tid % out_img_w;
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
}
}
void NearestInterpCompute::Run() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
Tensor* input = param.X;
Tensor* output = param.Out;
Tensor* out_size = param.OutSize;
auto* input_data = input->data<float>();
const int n = input->dims()[0];
const int c = input->dims()[1];
const int in_h = input->dims()[2];
const int in_w = input->dims()[3];
int out_h = param.out_h;
int out_w = param.out_w;
float scale = param.scale;
bool align_corners = param.align_corners;
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
if (out_size != nullptr) {
Tensor sizes;
float* size_data = sizes.mutable_data<float>();
float* outsize_data = out_size->mutable_data<float>(TARGET(kCUDA));
hipMemcpy(
size_data, outsize_data, sizeof(float) * 2, hipMemcpyDeviceToHost);
out_h = static_cast<int>(size_data[0]);
out_w = static_cast<int>(size_data[1]);
}
auto output_data = output->mutable_data<float>(TARGET(kCUDA));
if (in_h == out_h && in_w == out_w) {
hipMemcpy(output_data,
input_data,
sizeof(float) * n * c * in_h * in_w,
hipMemcpyHostToDevice);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
int pixelNum = n * out_chw;
int threads = 512;
int blocks = (pixelNum + threads - 1) / threads;
blocks = blocks > 8 ? 8 : blocks;
hipLaunchKernelGGL(( KeNearestNeighborInterp), dim3(blocks), dim3(threads), 0, stream, input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners);
hipError_t error = hipGetLastError();
if (error != hipSuccess) LOG(INFO) << hipGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(nearest_interp,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::NearestInterpCompute,
def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("OutSize", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
| nearest_interp_compute.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/nearest_interp_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
using Tensor = lite::Tensor;
__global__ void KeNearestNeighborInterp(const float* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
float* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int out_img_idx = tid % out_img_w;
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
}
}
void NearestInterpCompute::Run() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
Tensor* input = param.X;
Tensor* output = param.Out;
Tensor* out_size = param.OutSize;
auto* input_data = input->data<float>();
const int n = input->dims()[0];
const int c = input->dims()[1];
const int in_h = input->dims()[2];
const int in_w = input->dims()[3];
int out_h = param.out_h;
int out_w = param.out_w;
float scale = param.scale;
bool align_corners = param.align_corners;
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
if (out_size != nullptr) {
Tensor sizes;
float* size_data = sizes.mutable_data<float>();
float* outsize_data = out_size->mutable_data<float>(TARGET(kCUDA));
cudaMemcpy(
size_data, outsize_data, sizeof(float) * 2, cudaMemcpyDeviceToHost);
out_h = static_cast<int>(size_data[0]);
out_w = static_cast<int>(size_data[1]);
}
auto output_data = output->mutable_data<float>(TARGET(kCUDA));
if (in_h == out_h && in_w == out_w) {
cudaMemcpy(output_data,
input_data,
sizeof(float) * n * c * in_h * in_w,
cudaMemcpyHostToDevice);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
int pixelNum = n * out_chw;
int threads = 512;
int blocks = (pixelNum + threads - 1) / threads;
blocks = blocks > 8 ? 8 : blocks;
KeNearestNeighborInterp<<<blocks, threads, 0, stream>>>(input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) LOG(INFO) << cudaGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(nearest_interp,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::NearestInterpCompute,
def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("OutSize", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
|
6a6352f1f2401361663e20b794f2aae807acc82a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include "tree.cuh"
__device__ node* tempData[INPUTSIZE];
__device__ node* tempData1[INPUTSIZE];
__device__ node* root1;
__device__ node* globalCurr;
__device__ node* globalCurrs[ORDER];
__device__ node* newNode;
__device__ int globalIdx;
__device__ int tempKeys[ORDER];
__device__ node* tempPointers[ORDER];
__device__ int globalPointerIdx;
__device__ void make_node(node*& new_node)
{
new_node = (node*)malloc(sizeof(node));
new_node->keys = (int*)malloc( (ORDER - 1) * sizeof(int) );
new_node->pointers = (node**)malloc( ORDER * sizeof(node *) );
new_node->is_leaf = false;
new_node->num_keys = 0;
new_node->parent = NULL;
new_node->next = NULL;
}
__device__ void make_leaf(node*& new_node)
{
make_node(new_node);
new_node->is_leaf = true;
}
__global__ void buildLeaves(node*& root, int* input, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1);
node* newNode;
if(inWholeIdx < noOfNodes)
{
make_leaf(newNode);
tempData[inWholeIdx] = newNode;
assert(tempData[inWholeIdx]);
}
}
__global__ void buildRoot(node*& root, int* input, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
if(inWholeIdx == 0)
{
root1 = (node*)malloc(sizeof(node));
root1->keys = (int*)malloc( (ORDER - 1) * sizeof(int) );
root1->pointers = (node**)malloc( ORDER * sizeof(node *) );
root1->is_leaf = false;
root1->num_keys = 0;
root1->parent = NULL;
root1->next = NULL;
root1->keys[0] = 5;
}
}
__global__ void buildLevel(node*& root, int* input, int* result, int size, int x)
{
node** arr;
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned noOfNodes = size / (ORDER / 2);
if(x)
arr = tempData1;
else
arr = tempData;
if(inWholeIdx < noOfNodes)
{
node* newNode;
make_node(newNode);
newNode->keys[0] = inWholeIdx;
arr[inWholeIdx] = newNode;
}
}
__global__ void fillLevel(node*& root, int* input, int* result, int size, int x)
{
node** parent;
node** children;
if(x)
{
parent = tempData1;
children = tempData;
}
else
{
parent = tempData;
children = tempData1;
}
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned noOfNodes = size / (ORDER / 2);
unsigned inNodeIdx = inWholeIdx % (ORDER / 2);
unsigned nodeNo = inWholeIdx / (ORDER / 2 );
if(nodeNo == noOfNodes)
{
nodeNo--;
inNodeIdx = ((ORDER/2)) + inNodeIdx;
}
if(inWholeIdx < size)
{
assert(parent[nodeNo]);
assert(parent[nodeNo]->keys);
assert(children[inWholeIdx]);
parent[nodeNo]->pointers[inNodeIdx] = children[inWholeIdx];
children[inWholeIdx]->parent = parent[nodeNo];
if(inNodeIdx < (ORDER/2) - 1 || (nodeNo == noOfNodes -1 && inWholeIdx != size - 1))
{
assert(children[inWholeIdx]);
assert(children[inWholeIdx]->num_keys);
assert(children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]);
parent[nodeNo]->keys[inNodeIdx] = children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1];
assert(parent[nodeNo]->keys[inNodeIdx]);
}
}
if(inNodeIdx == 0)
{
if(nodeNo < noOfNodes -1)
{
parent[nodeNo]->num_keys = (ORDER / 2) - 1;
}
else if(nodeNo == noOfNodes - 1)
parent[nodeNo]->num_keys = (size % (ORDER / 2)) + (ORDER / 2) - 1;
}
}
__global__ void fillRoot(node*& root, int* input, int* result, int size, int x)
{
node** children;
if(x)
{
children = tempData;
}
else
{
children = tempData1;
}
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned inNodeIdx = inWholeIdx % size;
if(inWholeIdx < size)
{
assert(children[inWholeIdx]);
root1->pointers[inNodeIdx] = children[inWholeIdx];
children[inWholeIdx]->parent = root1;
if(inNodeIdx < size -1 )
{
assert(children[inWholeIdx]);
assert(children[inWholeIdx]->num_keys);
assert(children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]);
root1->keys[inWholeIdx] = children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1];
assert(root1->keys[inNodeIdx]);
}
}
if(inWholeIdx == 0)
{
root1->num_keys = size - 1;
}
}
__global__ void fillLeaves(node*& root, int* input, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1);
unsigned inNodeIdx = inWholeIdx % ((ORDER / 2) - 1);
unsigned nodeNo = inWholeIdx / ((ORDER / 2 ) -1);
if(nodeNo == noOfNodes)
{
nodeNo--;
inNodeIdx = ((ORDER/2) - 1) + inNodeIdx;
}
if(inWholeIdx < INPUTSIZE)
{
assert(tempData[nodeNo]);
assert(tempData[nodeNo]->keys);
assert(input[inWholeIdx]);
tempData[nodeNo]->keys[inNodeIdx] = input[inWholeIdx];
}
if(inNodeIdx == 0)
{
if(nodeNo < noOfNodes -1)
{
tempData[nodeNo]->next = tempData[nodeNo + 1];
tempData[nodeNo]->num_keys = ((ORDER / 2) - 1);
}
else if(nodeNo == noOfNodes - 1)
tempData[nodeNo]->num_keys = (INPUTSIZE % ((ORDER / 2) - 1)) + ((ORDER / 2) - 1);
}
}
__global__ void bulkLoad(node*& root, int* input, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1);
unsigned inNodeIdx = inWholeIdx % ((ORDER / 2) - 1);
unsigned nodeNo = inWholeIdx / ((ORDER / 2 ) -1);
if(inWholeIdx == 0)
{
root->keys = (int*)malloc( (ORDER - 1) * sizeof(int) );
root->pointers = (node**)malloc( ORDER * sizeof(node *) );
root->is_leaf = false;
root->num_keys = 0;
root->parent = NULL;
root->next = NULL;
}
node* newNode;
if(inNodeIdx == 0 && nodeNo < noOfNodes)
{
make_leaf(newNode);
tempData[nodeNo] = newNode;
assert(tempData[nodeNo]);
}
if(nodeNo == noOfNodes)
{
nodeNo--;
inNodeIdx = ((ORDER/2) - 1) + inNodeIdx;
}
__syncthreads();
if(inWholeIdx < INPUTSIZE && nodeNo < noOfNodes)
{
tempData[nodeNo]->keys[inNodeIdx] = input[inWholeIdx];
}
}
__device__ void addKey(node* curr, node* child)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
int val = child->keys[0];
if(contains(curr, val))
return;
if(inWholeIdx <= curr->num_keys)
{
if(inWholeIdx < curr->num_keys)
tempKeys[inWholeIdx] = curr->keys[inWholeIdx];
if(!curr->is_leaf)
tempPointers[inWholeIdx] = curr->pointers[inWholeIdx];
}
if(inWholeIdx <= curr->num_keys)
{
if(inWholeIdx == 0)
{
if(val <= curr->keys[0])
{
globalIdx = 0;
}
}
else if(inWholeIdx < curr->num_keys && inWholeIdx > 0)
{
if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx])
{
globalIdx = inWholeIdx;
}
}
else if(inWholeIdx == curr->num_keys)
{
if(val > curr->keys[curr->num_keys - 1])
{
globalIdx = curr->num_keys;
}
}
}
__syncthreads();
if(inWholeIdx >= globalIdx && inWholeIdx <= curr->num_keys)
{
if(inWholeIdx < curr->num_keys)
curr->keys[inWholeIdx+1] = tempKeys[inWholeIdx];
if(!curr->is_leaf)
curr->pointers[inWholeIdx+1] = curr->pointers[inWholeIdx];
}
__syncthreads();
if(inWholeIdx == globalIdx)
{
if(inWholeIdx > 0)
curr->keys[globalIdx] = val;
else
curr->keys[globalIdx] = child->keys[child->num_keys]+1;
if(!curr->is_leaf)
curr->pointers[globalIdx];
}
__syncthreads();
if(inWholeIdx == 0)
curr->num_keys++;
}
__device__ void addKey(node* curr, int val)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
if(contains(curr, val))
return;
if(inWholeIdx < curr->num_keys)
tempKeys[inWholeIdx] = curr->keys[inWholeIdx];
if(inWholeIdx <= curr->num_keys)
{
if(inWholeIdx == 0)
{
if(val <= curr->keys[0])
{
globalIdx = 0;
}
}
else if(inWholeIdx < curr->num_keys && inWholeIdx > 0)
{
if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx])
{
globalIdx = inWholeIdx;
}
}
else if(inWholeIdx == curr->num_keys)
{
if(val > curr->keys[curr->num_keys - 1])
{
globalIdx = curr->num_keys;
}
}
}
__syncthreads();
if(inWholeIdx >= globalIdx && inWholeIdx < curr->num_keys)
curr->keys[inWholeIdx+1] = tempKeys[inWholeIdx];
__syncthreads();
if(inWholeIdx == globalIdx)
curr->keys[globalIdx] = val;
__syncthreads();
if(inWholeIdx == 0)
curr->num_keys++;
}
__global__ void insertVal(int val)
{
node* curr = find(val);
__syncthreads();
assert(curr->num_keys < ORDER -1);
if(curr->num_keys < ORDER -1)
addKey(curr, val);
else
split(curr, val);
}
__device__ void split(node* curr, int val)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
node* newNodeLocal;
if(inWholeIdx == 0)
{
newNode = (node*)malloc(sizeof(node));
newNode->keys = (int*)malloc( (ORDER - 1) * sizeof(int) );
newNode->pointers = (node**)malloc( ORDER * sizeof(node *) );
newNode->is_leaf = curr->is_leaf;
newNode->num_keys = ORDER/2;
newNode->parent = curr->parent;
newNode->next = curr->next;
curr->num_keys = ORDER/2;
curr->next = newNode;
globalPointerIdx = 0;
}
__syncthreads();
newNodeLocal = newNode;
__syncthreads();
if(inWholeIdx < (ORDER /2))
{
newNode->keys[inWholeIdx] = curr->keys[ORDER/2 + inWholeIdx];
}
if(!curr->is_leaf && inWholeIdx <= (ORDER /2))
{
newNode->leafs[inWholeIdx] = curr->pointers[ORDER/2 + inWholeIdx];
}
if(curr->parent->num_keys >= ORDER)
split(curr, newNode);
else
addKey(curr->parent, newNodeLocal);
}
__device__ void split(node* curr, node* child)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
node* newNodeLocal;
if(inWholeIdx == 0)
{
newNode = (node*)malloc(sizeof(node));
newNode->keys = (int*)malloc( (ORDER - 1) * sizeof(int) );
newNode->pointers = (node**)malloc( ORDER * sizeof(node *) );
newNode->is_leaf = curr->is_leaf;
newNode->num_keys = ORDER/2;
newNode->parent = curr->parent;
newNode->next = curr->next;
curr->num_keys = ORDER/2;
curr->next = newNode;
globalPointerIdx = 0;
}
__syncthreads();
newNodeLocal = newNode;
__syncthreads();
if(inWholeIdx < (ORDER /2))
{
newNode->keys[inWholeIdx] = curr->keys[ORDER/2 + inWholeIdx];
}
if(!curr->is_leaf && inWholeIdx <= (ORDER /2))
{
newNode->leafs[inWholeIdx] = curr->pointers[ORDER/2 + inWholeIdx];
}
if(curr->parent->num_keys >= ORDER)
split(curr, newNode);
else
addKey(curr->parent, newNodeLocal);
}
__device__ int contains(node* curr, int val)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
if(inWholeIdx < curr->num_keys)
{
if(curr->keys[inWholeIdx] == val)
globalIdx = 1;
}
__syncthreads();
return globalIdx;
}
__device__ node* find(int val)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
assert(root1);
node* curr = root1;
assert(curr);
assert(!curr->is_leaf);
__syncthreads();
while(!curr->is_leaf)
{
if(inWholeIdx <= curr->num_keys)
{
if(inWholeIdx == 0)
{
assert(curr->keys[0]);
if(val <= curr->keys[0])
{
assert(curr->pointers[0]);
globalCurr = curr->pointers[0];
}
}
else if(inWholeIdx < curr->num_keys && inWholeIdx > 0)
{
assert(curr->keys[inWholeIdx-1]);
assert(curr->keys[inWholeIdx]);
if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx])
{
assert(curr->pointers[inWholeIdx]);
globalCurr = curr->pointers[inWholeIdx];
}
}
else if(inWholeIdx == curr->num_keys)
{
assert(curr->keys[curr->num_keys - 1]);
if(val > curr->keys[curr->num_keys - 1])
{
assert(curr->pointers[inWholeIdx]);
globalCurr = curr->pointers[inWholeIdx];
}
}
}
__syncthreads();
curr = globalCurr;
__syncthreads();
}
return curr;
}
__device__ node* find(int* values, int len)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned inNodeIdx = threadIdx.x;
unsigned nodeNo = blockIdx.x;
int val;
if(nodeNo < len)
val = values[nodeNo];
assert(root1);
node* curr = root1;
assert(curr);
assert(!curr->is_leaf);
__syncthreads();
while(!curr->is_leaf)
{
if(inNodeIdx <= curr->num_keys && nodeNo < len)
{
if(inNodeIdx == 0)
{
assert(curr->keys[0]);
if(val <= curr->keys[0])
{
assert(curr->pointers[0]);
globalCurrs[nodeNo] = curr->pointers[0];
}
}
else if(inNodeIdx < curr->num_keys && inNodeIdx > 0)
{
assert(curr->keys[inNodeIdx-1]);
assert(curr->keys[inNodeIdx]);
if(curr->keys[inNodeIdx-1] < val && val <= curr->keys[inNodeIdx])
{
assert(curr->pointers[inNodeIdx]);
globalCurrs[nodeNo] = curr->pointers[inNodeIdx];
}
}
else if(inNodeIdx == curr->num_keys)
{
assert(curr->keys[curr->num_keys - 1]);
if(val > curr->keys[curr->num_keys - 1])
{
assert(curr->pointers[inNodeIdx]);
globalCurrs[nodeNo] = curr->pointers[inNodeIdx];
}
}
}
__syncthreads();
assert(globalCurrs[nodeNo]);
curr = globalCurrs[nodeNo];
__syncthreads();
}
return curr;
}
__global__ void search(int val, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
node* curr = find(val);
result[0] = 0;
if(inWholeIdx < curr->num_keys)
{
if(curr->keys[inWholeIdx] == val)
result[0] = 1;
}
}
__global__ void search(int* vals, int* results, int len)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned inNodeIdx = threadIdx.x;
unsigned nodeNo = blockIdx.x;
node* curr = find(vals, len);
if(nodeNo < len)
results[nodeNo] = 0;
if(nodeNo < len && inNodeIdx < curr->num_keys)
{
if(curr->keys[inNodeIdx] == vals[nodeNo])
results[nodeNo] = 1;
}
}
__global__ void test(node*& root, int* input, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
if(inWholeIdx== 0)
{
node* curr = root1;
/*while(!curr->is_leaf)
{
curr = curr->pointers[2];
}*/
result[0] = root1->pointers[root1->num_keys]->keys[0];
}
}
| 6a6352f1f2401361663e20b794f2aae807acc82a.cu |
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <assert.h>
#include "tree.cuh"
__device__ node* tempData[INPUTSIZE];
__device__ node* tempData1[INPUTSIZE];
__device__ node* root1;
__device__ node* globalCurr;
__device__ node* globalCurrs[ORDER];
__device__ node* newNode;
__device__ int globalIdx;
__device__ int tempKeys[ORDER];
__device__ node* tempPointers[ORDER];
__device__ int globalPointerIdx;
__device__ void make_node(node*& new_node)
{
new_node = (node*)malloc(sizeof(node));
new_node->keys = (int*)malloc( (ORDER - 1) * sizeof(int) );
new_node->pointers = (node**)malloc( ORDER * sizeof(node *) );
new_node->is_leaf = false;
new_node->num_keys = 0;
new_node->parent = NULL;
new_node->next = NULL;
}
__device__ void make_leaf(node*& new_node)
{
make_node(new_node);
new_node->is_leaf = true;
}
__global__ void buildLeaves(node*& root, int* input, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1);
node* newNode;
if(inWholeIdx < noOfNodes)
{
make_leaf(newNode);
tempData[inWholeIdx] = newNode;
assert(tempData[inWholeIdx]);
}
}
__global__ void buildRoot(node*& root, int* input, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
if(inWholeIdx == 0)
{
root1 = (node*)malloc(sizeof(node));
root1->keys = (int*)malloc( (ORDER - 1) * sizeof(int) );
root1->pointers = (node**)malloc( ORDER * sizeof(node *) );
root1->is_leaf = false;
root1->num_keys = 0;
root1->parent = NULL;
root1->next = NULL;
root1->keys[0] = 5;
}
}
__global__ void buildLevel(node*& root, int* input, int* result, int size, int x)
{
node** arr;
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned noOfNodes = size / (ORDER / 2);
if(x)
arr = tempData1;
else
arr = tempData;
if(inWholeIdx < noOfNodes)
{
node* newNode;
make_node(newNode);
newNode->keys[0] = inWholeIdx;
arr[inWholeIdx] = newNode;
}
}
__global__ void fillLevel(node*& root, int* input, int* result, int size, int x)
{
node** parent;
node** children;
if(x)
{
parent = tempData1;
children = tempData;
}
else
{
parent = tempData;
children = tempData1;
}
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned noOfNodes = size / (ORDER / 2);
unsigned inNodeIdx = inWholeIdx % (ORDER / 2);
unsigned nodeNo = inWholeIdx / (ORDER / 2 );
if(nodeNo == noOfNodes)
{
nodeNo--;
inNodeIdx = ((ORDER/2)) + inNodeIdx;
}
if(inWholeIdx < size)
{
assert(parent[nodeNo]);
assert(parent[nodeNo]->keys);
assert(children[inWholeIdx]);
parent[nodeNo]->pointers[inNodeIdx] = children[inWholeIdx];
children[inWholeIdx]->parent = parent[nodeNo];
if(inNodeIdx < (ORDER/2) - 1 || (nodeNo == noOfNodes -1 && inWholeIdx != size - 1))
{
assert(children[inWholeIdx]);
assert(children[inWholeIdx]->num_keys);
assert(children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]);
parent[nodeNo]->keys[inNodeIdx] = children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1];
assert(parent[nodeNo]->keys[inNodeIdx]);
}
}
if(inNodeIdx == 0)
{
if(nodeNo < noOfNodes -1)
{
parent[nodeNo]->num_keys = (ORDER / 2) - 1;
}
else if(nodeNo == noOfNodes - 1)
parent[nodeNo]->num_keys = (size % (ORDER / 2)) + (ORDER / 2) - 1;
}
}
__global__ void fillRoot(node*& root, int* input, int* result, int size, int x)
{
node** children;
if(x)
{
children = tempData;
}
else
{
children = tempData1;
}
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned inNodeIdx = inWholeIdx % size;
if(inWholeIdx < size)
{
assert(children[inWholeIdx]);
root1->pointers[inNodeIdx] = children[inWholeIdx];
children[inWholeIdx]->parent = root1;
if(inNodeIdx < size -1 )
{
assert(children[inWholeIdx]);
assert(children[inWholeIdx]->num_keys);
assert(children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1]);
root1->keys[inWholeIdx] = children[inWholeIdx]->keys[children[inWholeIdx]->num_keys-1];
assert(root1->keys[inNodeIdx]);
}
}
if(inWholeIdx == 0)
{
root1->num_keys = size - 1;
}
}
__global__ void fillLeaves(node*& root, int* input, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1);
unsigned inNodeIdx = inWholeIdx % ((ORDER / 2) - 1);
unsigned nodeNo = inWholeIdx / ((ORDER / 2 ) -1);
if(nodeNo == noOfNodes)
{
nodeNo--;
inNodeIdx = ((ORDER/2) - 1) + inNodeIdx;
}
if(inWholeIdx < INPUTSIZE)
{
assert(tempData[nodeNo]);
assert(tempData[nodeNo]->keys);
assert(input[inWholeIdx]);
tempData[nodeNo]->keys[inNodeIdx] = input[inWholeIdx];
}
if(inNodeIdx == 0)
{
if(nodeNo < noOfNodes -1)
{
tempData[nodeNo]->next = tempData[nodeNo + 1];
tempData[nodeNo]->num_keys = ((ORDER / 2) - 1);
}
else if(nodeNo == noOfNodes - 1)
tempData[nodeNo]->num_keys = (INPUTSIZE % ((ORDER / 2) - 1)) + ((ORDER / 2) - 1);
}
}
__global__ void bulkLoad(node*& root, int* input, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned noOfNodes = INPUTSIZE / ((ORDER / 2) - 1);
unsigned inNodeIdx = inWholeIdx % ((ORDER / 2) - 1);
unsigned nodeNo = inWholeIdx / ((ORDER / 2 ) -1);
if(inWholeIdx == 0)
{
root->keys = (int*)malloc( (ORDER - 1) * sizeof(int) );
root->pointers = (node**)malloc( ORDER * sizeof(node *) );
root->is_leaf = false;
root->num_keys = 0;
root->parent = NULL;
root->next = NULL;
}
node* newNode;
if(inNodeIdx == 0 && nodeNo < noOfNodes)
{
make_leaf(newNode);
tempData[nodeNo] = newNode;
assert(tempData[nodeNo]);
}
if(nodeNo == noOfNodes)
{
nodeNo--;
inNodeIdx = ((ORDER/2) - 1) + inNodeIdx;
}
__syncthreads();
if(inWholeIdx < INPUTSIZE && nodeNo < noOfNodes)
{
tempData[nodeNo]->keys[inNodeIdx] = input[inWholeIdx];
}
}
__device__ void addKey(node* curr, node* child)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
int val = child->keys[0];
if(contains(curr, val))
return;
if(inWholeIdx <= curr->num_keys)
{
if(inWholeIdx < curr->num_keys)
tempKeys[inWholeIdx] = curr->keys[inWholeIdx];
if(!curr->is_leaf)
tempPointers[inWholeIdx] = curr->pointers[inWholeIdx];
}
if(inWholeIdx <= curr->num_keys)
{
if(inWholeIdx == 0)
{
if(val <= curr->keys[0])
{
globalIdx = 0;
}
}
else if(inWholeIdx < curr->num_keys && inWholeIdx > 0)
{
if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx])
{
globalIdx = inWholeIdx;
}
}
else if(inWholeIdx == curr->num_keys)
{
if(val > curr->keys[curr->num_keys - 1])
{
globalIdx = curr->num_keys;
}
}
}
__syncthreads();
if(inWholeIdx >= globalIdx && inWholeIdx <= curr->num_keys)
{
if(inWholeIdx < curr->num_keys)
curr->keys[inWholeIdx+1] = tempKeys[inWholeIdx];
if(!curr->is_leaf)
curr->pointers[inWholeIdx+1] = curr->pointers[inWholeIdx];
}
__syncthreads();
if(inWholeIdx == globalIdx)
{
if(inWholeIdx > 0)
curr->keys[globalIdx] = val;
else
curr->keys[globalIdx] = child->keys[child->num_keys]+1;
if(!curr->is_leaf)
curr->pointers[globalIdx];
}
__syncthreads();
if(inWholeIdx == 0)
curr->num_keys++;
}
__device__ void addKey(node* curr, int val)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
if(contains(curr, val))
return;
if(inWholeIdx < curr->num_keys)
tempKeys[inWholeIdx] = curr->keys[inWholeIdx];
if(inWholeIdx <= curr->num_keys)
{
if(inWholeIdx == 0)
{
if(val <= curr->keys[0])
{
globalIdx = 0;
}
}
else if(inWholeIdx < curr->num_keys && inWholeIdx > 0)
{
if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx])
{
globalIdx = inWholeIdx;
}
}
else if(inWholeIdx == curr->num_keys)
{
if(val > curr->keys[curr->num_keys - 1])
{
globalIdx = curr->num_keys;
}
}
}
__syncthreads();
if(inWholeIdx >= globalIdx && inWholeIdx < curr->num_keys)
curr->keys[inWholeIdx+1] = tempKeys[inWholeIdx];
__syncthreads();
if(inWholeIdx == globalIdx)
curr->keys[globalIdx] = val;
__syncthreads();
if(inWholeIdx == 0)
curr->num_keys++;
}
__global__ void insertVal(int val)
{
node* curr = find(val);
__syncthreads();
assert(curr->num_keys < ORDER -1);
if(curr->num_keys < ORDER -1)
addKey(curr, val);
else
split(curr, val);
}
__device__ void split(node* curr, int val)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
node* newNodeLocal;
if(inWholeIdx == 0)
{
newNode = (node*)malloc(sizeof(node));
newNode->keys = (int*)malloc( (ORDER - 1) * sizeof(int) );
newNode->pointers = (node**)malloc( ORDER * sizeof(node *) );
newNode->is_leaf = curr->is_leaf;
newNode->num_keys = ORDER/2;
newNode->parent = curr->parent;
newNode->next = curr->next;
curr->num_keys = ORDER/2;
curr->next = newNode;
globalPointerIdx = 0;
}
__syncthreads();
newNodeLocal = newNode;
__syncthreads();
if(inWholeIdx < (ORDER /2))
{
newNode->keys[inWholeIdx] = curr->keys[ORDER/2 + inWholeIdx];
}
if(!curr->is_leaf && inWholeIdx <= (ORDER /2))
{
newNode->leafs[inWholeIdx] = curr->pointers[ORDER/2 + inWholeIdx];
}
if(curr->parent->num_keys >= ORDER)
split(curr, newNode);
else
addKey(curr->parent, newNodeLocal);
}
__device__ void split(node* curr, node* child)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
node* newNodeLocal;
if(inWholeIdx == 0)
{
newNode = (node*)malloc(sizeof(node));
newNode->keys = (int*)malloc( (ORDER - 1) * sizeof(int) );
newNode->pointers = (node**)malloc( ORDER * sizeof(node *) );
newNode->is_leaf = curr->is_leaf;
newNode->num_keys = ORDER/2;
newNode->parent = curr->parent;
newNode->next = curr->next;
curr->num_keys = ORDER/2;
curr->next = newNode;
globalPointerIdx = 0;
}
__syncthreads();
newNodeLocal = newNode;
__syncthreads();
if(inWholeIdx < (ORDER /2))
{
newNode->keys[inWholeIdx] = curr->keys[ORDER/2 + inWholeIdx];
}
if(!curr->is_leaf && inWholeIdx <= (ORDER /2))
{
newNode->leafs[inWholeIdx] = curr->pointers[ORDER/2 + inWholeIdx];
}
if(curr->parent->num_keys >= ORDER)
split(curr, newNode);
else
addKey(curr->parent, newNodeLocal);
}
__device__ int contains(node* curr, int val)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
if(inWholeIdx < curr->num_keys)
{
if(curr->keys[inWholeIdx] == val)
globalIdx = 1;
}
__syncthreads();
return globalIdx;
}
__device__ node* find(int val)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
assert(root1);
node* curr = root1;
assert(curr);
assert(!curr->is_leaf);
__syncthreads();
while(!curr->is_leaf)
{
if(inWholeIdx <= curr->num_keys)
{
if(inWholeIdx == 0)
{
assert(curr->keys[0]);
if(val <= curr->keys[0])
{
assert(curr->pointers[0]);
globalCurr = curr->pointers[0];
}
}
else if(inWholeIdx < curr->num_keys && inWholeIdx > 0)
{
assert(curr->keys[inWholeIdx-1]);
assert(curr->keys[inWholeIdx]);
if(curr->keys[inWholeIdx-1] < val && val <= curr->keys[inWholeIdx])
{
assert(curr->pointers[inWholeIdx]);
globalCurr = curr->pointers[inWholeIdx];
}
}
else if(inWholeIdx == curr->num_keys)
{
assert(curr->keys[curr->num_keys - 1]);
if(val > curr->keys[curr->num_keys - 1])
{
assert(curr->pointers[inWholeIdx]);
globalCurr = curr->pointers[inWholeIdx];
}
}
}
__syncthreads();
curr = globalCurr;
__syncthreads();
}
return curr;
}
__device__ node* find(int* values, int len)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned inNodeIdx = threadIdx.x;
unsigned nodeNo = blockIdx.x;
int val;
if(nodeNo < len)
val = values[nodeNo];
assert(root1);
node* curr = root1;
assert(curr);
assert(!curr->is_leaf);
__syncthreads();
while(!curr->is_leaf)
{
if(inNodeIdx <= curr->num_keys && nodeNo < len)
{
if(inNodeIdx == 0)
{
assert(curr->keys[0]);
if(val <= curr->keys[0])
{
assert(curr->pointers[0]);
globalCurrs[nodeNo] = curr->pointers[0];
}
}
else if(inNodeIdx < curr->num_keys && inNodeIdx > 0)
{
assert(curr->keys[inNodeIdx-1]);
assert(curr->keys[inNodeIdx]);
if(curr->keys[inNodeIdx-1] < val && val <= curr->keys[inNodeIdx])
{
assert(curr->pointers[inNodeIdx]);
globalCurrs[nodeNo] = curr->pointers[inNodeIdx];
}
}
else if(inNodeIdx == curr->num_keys)
{
assert(curr->keys[curr->num_keys - 1]);
if(val > curr->keys[curr->num_keys - 1])
{
assert(curr->pointers[inNodeIdx]);
globalCurrs[nodeNo] = curr->pointers[inNodeIdx];
}
}
}
__syncthreads();
assert(globalCurrs[nodeNo]);
curr = globalCurrs[nodeNo];
__syncthreads();
}
return curr;
}
__global__ void search(int val, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
node* curr = find(val);
result[0] = 0;
if(inWholeIdx < curr->num_keys)
{
if(curr->keys[inWholeIdx] == val)
result[0] = 1;
}
}
__global__ void search(int* vals, int* results, int len)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
unsigned inNodeIdx = threadIdx.x;
unsigned nodeNo = blockIdx.x;
node* curr = find(vals, len);
if(nodeNo < len)
results[nodeNo] = 0;
if(nodeNo < len && inNodeIdx < curr->num_keys)
{
if(curr->keys[inNodeIdx] == vals[nodeNo])
results[nodeNo] = 1;
}
}
__global__ void test(node*& root, int* input, int* result)
{
unsigned inWholeIdx = blockIdx.x*blockDim.x+threadIdx.x;
if(inWholeIdx== 0)
{
node* curr = root1;
/*while(!curr->is_leaf)
{
curr = curr->pointers[2];
}*/
result[0] = root1->pointers[root1->num_keys]->keys[0];
}
}
|
8b2a608d6b2e7dc53f8a41f1fc8081d8e564d264.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zmgeelltmv.cu normal z -> d, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
dmgeelltmv_kernel( int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y)
{
extern __shared__ double dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
double val = d_val [ num_rows * n + row ];
if( val != 0){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * d_x[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++ )
d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * d_y [ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs mama_int_t
number of vectors
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha double
scalar multiplier
@param
d_val double*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x double*
input vector x
@param
beta double
scalar multiplier
@param
d_y double*
input/output vector y
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dmgeelltmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( double ); // num_vecs vectors
hipLaunchKernelGGL(( dmgeelltmv_kernel), dim3(grid), dim3(BLOCK_SIZE), MEM_SIZE , 0,
m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
| 8b2a608d6b2e7dc53f8a41f1fc8081d8e564d264.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zmgeelltmv.cu normal z -> d, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
dmgeelltmv_kernel( int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y)
{
extern __shared__ double dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
double val = d_val [ num_rows * n + row ];
if( val != 0){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * d_x[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++ )
d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * d_y [ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs mama_int_t
number of vectors
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha double
scalar multiplier
@param
d_val double*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x double*
input vector x
@param
beta double
scalar multiplier
@param
d_y double*
input/output vector y
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dmgeelltmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( double ); // num_vecs vectors
dmgeelltmv_kernel<<< grid, BLOCK_SIZE, MEM_SIZE >>>
( m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
|
Subsets and Splits