hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
5f571fa4951eea2727782e4846319a62fafe7408.hip | // !!! This is a file automatically generated by hipify!!!
//Same as GPU8 but now the shared memory is bamded and the thread reads are also banded
//http://ogldev.atspace.co.uk/index.html
//Numerical Methods in Astrophysics: An Introduction by CRC Press
// nvcc nbodyGPU9.cu -o GPU9 -lglut -lm -lGLU -lGL
//To stop hit "control c" in the window you launched it from.
#include <GL/glut.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define N 65536
#define BLOCK 256
#define XWindowSize 2500
#define YWindowSize 2500
#define DRAW 10
#define PRINT 100
#define DAMP 0.5
#define G 1.0
#define H 1.0
#define EPSILON 0.000001
#define DT 0.001
#define STOP_TIME 0.5
#define EYE 10.0
#define FAR 50.0
// Globals
float4 p[N];
float3 v[N], f[N];
float4 *p_GPU;
float3 *v_GPU, *f_GPU;
FILE *data_file, *data_file1, *data_file2;
dim3 block, grid;
void set_initail_conditions()
{
int i,j,k,num,particles_per_side;
float position_start, temp;
float initail_seperation;
if(N%BLOCK != 0)
{
printf("\nError: Number of Particles is not a multiple of the block size \n\n");
exit(0);
}
temp = pow((float)N,1.0/3.0) + 0.99999;
particles_per_side = temp;
printf("\n cube root of N = %d \n", particles_per_side);
position_start = -(particles_per_side -1.0)/2.0;
initail_seperation = 2.0;
for(i=0; i<N; i++)
{
p[i].w = 1.0;
}
num = 0;
for(i=0; i<particles_per_side; i++)
{
for(j=0; j<particles_per_side; j++)
{
for(k=0; k<particles_per_side; k++)
{
if(N <= num) break;
p[num].x = position_start + i*initail_seperation;
p[num].y = position_start + j*initail_seperation;
p[num].z = position_start + k*initail_seperation;
v[num].x = 0.0;
v[num].y = 0.0;
v[num].z = 0.0;
num++;
}
}
}
block.x = BLOCK;
block.y = 1;
block.z = 1;
grid.x = (N-1)/block.x + 1;
grid.y = 1;
grid.z = 1;
hipMalloc( (void**)&p_GPU, N *sizeof(float4) );
hipMalloc( (void**)&v_GPU, N *sizeof(float3) );
hipMalloc( (void**)&f_GPU, N *sizeof(float3) );
}
void draw_picture()
{
int i;
glClear(GL_COLOR_BUFFER_BIT);
glClear(GL_DEPTH_BUFFER_BIT);
glColor3d(1.0,1.0,0.5);
for(i=0; i<N; i++)
{
glPushMatrix();
glTranslatef(p[i].x, p[i].y, p[i].z);
glutSolidSphere(0.1,20,20);
glPopMatrix();
}
glutSwapBuffers();
}
__device__ float3 getBodyBodyForce(float4 p0, float4 p1)
{
float3 f;
float dx = p1.x - p0.x;
float dy = p1.y - p0.y;
float dz = p1.z - p0.z;
float r2 = dx*dx + dy*dy + dz*dz + EPSILON;
float r = sqrt(r2);
float force = (G*p0.w*p1.w)/(r2) - (H*p0.w*p1.w)/(r2*r2);
f.x = force*dx/r;
f.y = force*dy/r;
f.z = force*dz/r;
return(f);
}
__global__ void getForces(float4 *pos, float3 *vel, float3 * force)
{
int k,kk;
float3 force_mag, forceSum;
float4 posMe;
__shared__ float4 shPos[BLOCK];
int id = threadIdx.x + blockDim.x*blockIdx.x;
forceSum.x = 0.0;
forceSum.y = 0.0;
forceSum.z = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
k = blockIdx.x;
for(int j=0; j < gridDim.x; j++)
{
if(gridDim.x <= k) k = k%gridDim.x;
shPos[threadIdx.x] = pos[threadIdx.x + blockDim.x*k];
__syncthreads();
kk = threadIdx.x;
#pragma unroll 32
for(int i=0; i < blockDim.x; i++)
{
if(blockDim.x <= kk) kk = kk%blockDim.x;
force_mag = getBodyBodyForce(posMe, shPos[kk]);
forceSum.x += force_mag.x;
forceSum.y += force_mag.y;
forceSum.z += force_mag.z;
kk++;
}
k++;
}
force[id].x = forceSum.x;
force[id].y = forceSum.y;
force[id].z = forceSum.z;
}
__global__ void moveBodies(float4 *pos, float3 *vel, float3 * force)
{
int id = threadIdx.x + blockDim.x*blockIdx.x;
vel[id].x += ((force[id].x-DAMP*vel[id].x)/pos[id].w)*DT;
vel[id].y += ((force[id].y-DAMP*vel[id].y)/pos[id].w)*DT;
vel[id].z += ((force[id].z-DAMP*vel[id].z)/pos[id].w)*DT;
pos[id].x += vel[id].x*DT;
pos[id].y += vel[id].y*DT;
pos[id].z += vel[id].z*DT;
}
void n_body()
{
float dt;
int tdraw = 0;
float time = 0.0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
dt = DT;
hipMemcpy( p_GPU, p, N *sizeof(float4), hipMemcpyHostToDevice );
hipMemcpy( v_GPU, v, N *sizeof(float3), hipMemcpyHostToDevice );
while(time < STOP_TIME)
{
hipLaunchKernelGGL(( getForces), dim3(grid), dim3(block), 0, 0, p_GPU, v_GPU, f_GPU);
hipLaunchKernelGGL(( moveBodies), dim3(grid), dim3(block), 0, 0, p_GPU, v_GPU, f_GPU);
/*
if(tdraw == DRAW)
{
hipMemcpy( p, p_GPU, N *sizeof(float4), hipMemcpyDeviceToHost );
draw_picture();
tdraw = 0;
}
tdraw++;
*/
time += dt;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("\n\nGPU time = %3.1f milliseconds\n", elapsedTime);
hipMemcpy( p, p_GPU, N *sizeof(float4), hipMemcpyDeviceToHost );
}
void control()
{
set_initail_conditions();
draw_picture();
n_body();
draw_picture();
printf("\n DONE \n");
while(1);
}
void Display(void)
{
gluLookAt(EYE, EYE, EYE, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
control();
}
void reshape(int w, int h)
{
glViewport(0, 0, (GLsizei) w, (GLsizei) h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-0.2, 0.2, -0.2, 0.2, 0.2, FAR);
glMatrixMode(GL_MODELVIEW);
}
int main(int argc, char** argv)
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(XWindowSize,YWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("2 Body 3D");
GLfloat light_position[] = {1.0, 1.0, 1.0, 0.0};
GLfloat light_ambient[] = {0.0, 0.0, 0.0, 1.0};
GLfloat light_diffuse[] = {1.0, 1.0, 1.0, 1.0};
GLfloat light_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat lmodel_ambient[] = {0.2, 0.2, 0.2, 1.0};
GLfloat mat_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat mat_shininess[] = {10.0};
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_SMOOTH);
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient);
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse);
glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular);
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, lmodel_ambient);
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular);
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_DEPTH_TEST);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}
| 5f571fa4951eea2727782e4846319a62fafe7408.cu | //Same as GPU8 but now the shared memory is bamded and the thread reads are also banded
//http://ogldev.atspace.co.uk/index.html
//Numerical Methods in Astrophysics: An Introduction by CRC Press
// nvcc nbodyGPU9.cu -o GPU9 -lglut -lm -lGLU -lGL
//To stop hit "control c" in the window you launched it from.
#include <GL/glut.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 65536
#define BLOCK 256
#define XWindowSize 2500
#define YWindowSize 2500
#define DRAW 10
#define PRINT 100
#define DAMP 0.5
#define G 1.0
#define H 1.0
#define EPSILON 0.000001
#define DT 0.001
#define STOP_TIME 0.5
#define EYE 10.0
#define FAR 50.0
// Globals
float4 p[N];
float3 v[N], f[N];
float4 *p_GPU;
float3 *v_GPU, *f_GPU;
FILE *data_file, *data_file1, *data_file2;
dim3 block, grid;
void set_initail_conditions()
{
int i,j,k,num,particles_per_side;
float position_start, temp;
float initail_seperation;
if(N%BLOCK != 0)
{
printf("\nError: Number of Particles is not a multiple of the block size \n\n");
exit(0);
}
temp = pow((float)N,1.0/3.0) + 0.99999;
particles_per_side = temp;
printf("\n cube root of N = %d \n", particles_per_side);
position_start = -(particles_per_side -1.0)/2.0;
initail_seperation = 2.0;
for(i=0; i<N; i++)
{
p[i].w = 1.0;
}
num = 0;
for(i=0; i<particles_per_side; i++)
{
for(j=0; j<particles_per_side; j++)
{
for(k=0; k<particles_per_side; k++)
{
if(N <= num) break;
p[num].x = position_start + i*initail_seperation;
p[num].y = position_start + j*initail_seperation;
p[num].z = position_start + k*initail_seperation;
v[num].x = 0.0;
v[num].y = 0.0;
v[num].z = 0.0;
num++;
}
}
}
block.x = BLOCK;
block.y = 1;
block.z = 1;
grid.x = (N-1)/block.x + 1;
grid.y = 1;
grid.z = 1;
cudaMalloc( (void**)&p_GPU, N *sizeof(float4) );
cudaMalloc( (void**)&v_GPU, N *sizeof(float3) );
cudaMalloc( (void**)&f_GPU, N *sizeof(float3) );
}
void draw_picture()
{
int i;
glClear(GL_COLOR_BUFFER_BIT);
glClear(GL_DEPTH_BUFFER_BIT);
glColor3d(1.0,1.0,0.5);
for(i=0; i<N; i++)
{
glPushMatrix();
glTranslatef(p[i].x, p[i].y, p[i].z);
glutSolidSphere(0.1,20,20);
glPopMatrix();
}
glutSwapBuffers();
}
__device__ float3 getBodyBodyForce(float4 p0, float4 p1)
{
float3 f;
float dx = p1.x - p0.x;
float dy = p1.y - p0.y;
float dz = p1.z - p0.z;
float r2 = dx*dx + dy*dy + dz*dz + EPSILON;
float r = sqrt(r2);
float force = (G*p0.w*p1.w)/(r2) - (H*p0.w*p1.w)/(r2*r2);
f.x = force*dx/r;
f.y = force*dy/r;
f.z = force*dz/r;
return(f);
}
__global__ void getForces(float4 *pos, float3 *vel, float3 * force)
{
int k,kk;
float3 force_mag, forceSum;
float4 posMe;
__shared__ float4 shPos[BLOCK];
int id = threadIdx.x + blockDim.x*blockIdx.x;
forceSum.x = 0.0;
forceSum.y = 0.0;
forceSum.z = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
k = blockIdx.x;
for(int j=0; j < gridDim.x; j++)
{
if(gridDim.x <= k) k = k%gridDim.x;
shPos[threadIdx.x] = pos[threadIdx.x + blockDim.x*k];
__syncthreads();
kk = threadIdx.x;
#pragma unroll 32
for(int i=0; i < blockDim.x; i++)
{
if(blockDim.x <= kk) kk = kk%blockDim.x;
force_mag = getBodyBodyForce(posMe, shPos[kk]);
forceSum.x += force_mag.x;
forceSum.y += force_mag.y;
forceSum.z += force_mag.z;
kk++;
}
k++;
}
force[id].x = forceSum.x;
force[id].y = forceSum.y;
force[id].z = forceSum.z;
}
__global__ void moveBodies(float4 *pos, float3 *vel, float3 * force)
{
int id = threadIdx.x + blockDim.x*blockIdx.x;
vel[id].x += ((force[id].x-DAMP*vel[id].x)/pos[id].w)*DT;
vel[id].y += ((force[id].y-DAMP*vel[id].y)/pos[id].w)*DT;
vel[id].z += ((force[id].z-DAMP*vel[id].z)/pos[id].w)*DT;
pos[id].x += vel[id].x*DT;
pos[id].y += vel[id].y*DT;
pos[id].z += vel[id].z*DT;
}
void n_body()
{
float dt;
int tdraw = 0;
float time = 0.0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dt = DT;
cudaMemcpy( p_GPU, p, N *sizeof(float4), cudaMemcpyHostToDevice );
cudaMemcpy( v_GPU, v, N *sizeof(float3), cudaMemcpyHostToDevice );
while(time < STOP_TIME)
{
getForces<<<grid, block>>>(p_GPU, v_GPU, f_GPU);
moveBodies<<<grid, block>>>(p_GPU, v_GPU, f_GPU);
/*
if(tdraw == DRAW)
{
cudaMemcpy( p, p_GPU, N *sizeof(float4), cudaMemcpyDeviceToHost );
draw_picture();
tdraw = 0;
}
tdraw++;
*/
time += dt;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\n\nGPU time = %3.1f milliseconds\n", elapsedTime);
cudaMemcpy( p, p_GPU, N *sizeof(float4), cudaMemcpyDeviceToHost );
}
void control()
{
set_initail_conditions();
draw_picture();
n_body();
draw_picture();
printf("\n DONE \n");
while(1);
}
void Display(void)
{
gluLookAt(EYE, EYE, EYE, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
control();
}
void reshape(int w, int h)
{
glViewport(0, 0, (GLsizei) w, (GLsizei) h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-0.2, 0.2, -0.2, 0.2, 0.2, FAR);
glMatrixMode(GL_MODELVIEW);
}
int main(int argc, char** argv)
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(XWindowSize,YWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("2 Body 3D");
GLfloat light_position[] = {1.0, 1.0, 1.0, 0.0};
GLfloat light_ambient[] = {0.0, 0.0, 0.0, 1.0};
GLfloat light_diffuse[] = {1.0, 1.0, 1.0, 1.0};
GLfloat light_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat lmodel_ambient[] = {0.2, 0.2, 0.2, 1.0};
GLfloat mat_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat mat_shininess[] = {10.0};
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_SMOOTH);
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient);
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse);
glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular);
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, lmodel_ambient);
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular);
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_DEPTH_TEST);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}
|
bd05edac4b9530d76524dd797b772d010e654864.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.h"
#include <algorithm>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <string>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/extrema.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "loadSaveImage.h"
#include <stdio.h>
#define MAX(x,y) ((x)>(y)?(x):(y))
#define MIN(x,y) ((x)<(y)?(x):(y))
//simple cross correlation kernel copied from Mike's IPython Notebook
__global__ void naive_normalized_cross_correlation(
float* d_response,
unsigned char* d_original,
unsigned char* d_template,
int num_pixels_y,
int num_pixels_x,
int template_half_height,
int template_height,
int template_half_width,
int template_width,
int template_size,
float template_mean
)
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int knx = template_width;
int2 image_index_2d = make_int2((blockIdx.x * blockDim.x) + threadIdx.x, (blockIdx.y * blockDim.y) + threadIdx.y);
int image_index_1d = (nx * image_index_2d.y) + image_index_2d.x;
if (image_index_2d.x < nx && image_index_2d.y < ny)
{
//
// compute image mean
//
float image_sum = 0.0f;
for (int y = -template_half_height; y <= template_half_height; y++)
{
for (int x = -template_half_width; x <= template_half_width; x++)
{
int2 image_offset_index_2d = make_int2(image_index_2d.x + x, image_index_2d.y + y);
int2 image_offset_index_2d_clamped = make_int2(MIN(nx - 1, MAX(0, image_offset_index_2d.x)), MIN(ny - 1, MAX(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y) + image_offset_index_2d_clamped.x;
unsigned char image_offset_value = d_original[image_offset_index_1d_clamped];
image_sum += (float)image_offset_value;
}
}
float image_mean = image_sum / (float)template_size;
//
// compute sums
//
float sum_of_image_template_diff_products = 0.0f;
float sum_of_squared_image_diffs = 0.0f;
float sum_of_squared_template_diffs = 0.0f;
for (int y = -template_half_height; y <= template_half_height; y++)
{
for (int x = -template_half_width; x <= template_half_width; x++)
{
int2 image_offset_index_2d = make_int2(image_index_2d.x + x, image_index_2d.y + y);
int2 image_offset_index_2d_clamped = make_int2(MIN(nx - 1, MAX(0, image_offset_index_2d.x)), MIN(ny - 1, MAX(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y) + image_offset_index_2d_clamped.x;
unsigned char image_offset_value = d_original[image_offset_index_1d_clamped];
float image_diff = (float)image_offset_value - image_mean;
int2 template_index_2d = make_int2(x + template_half_width, y + template_half_height);
int template_index_1d = (knx * template_index_2d.y) + template_index_2d.x;
unsigned char template_value = d_template[template_index_1d];
float template_diff = template_value - template_mean;
float image_template_diff_product = image_offset_value * template_diff;
float squared_image_diff = image_diff * image_diff;
float squared_template_diff = template_diff * template_diff;
sum_of_image_template_diff_products += image_template_diff_product;
sum_of_squared_image_diffs += squared_image_diff;
sum_of_squared_template_diffs += squared_template_diff;
}
}
//
// compute final result
//
float result_value = 0.0f;
if (sum_of_squared_image_diffs != 0 && sum_of_squared_template_diffs != 0)
{
result_value = sum_of_image_template_diff_products / sqrt(sum_of_squared_image_diffs * sum_of_squared_template_diffs);
}
d_response[image_index_1d] = result_value;
}
}
__global__ void remove_redness_from_coordinates(
const unsigned int* d_coordinates,
unsigned char* d_r,
unsigned char* d_b,
unsigned char* d_g,
unsigned char* d_r_output,
int num_coordinates,
int num_pixels_y,
int num_pixels_x,
int template_half_height,
int template_half_width
)
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int global_index_1d = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgSize = num_pixels_x * num_pixels_y;
if (global_index_1d < num_coordinates)
{
unsigned int image_index_1d = d_coordinates[imgSize - global_index_1d - 1];
ushort2 image_index_2d = make_ushort2(image_index_1d % num_pixels_x, image_index_1d / num_pixels_x);
for (int y = image_index_2d.y - template_half_height; y <= image_index_2d.y + template_half_height; y++)
{
for (int x = image_index_2d.x - template_half_width; x <= image_index_2d.x + template_half_width; x++)
{
int2 image_offset_index_2d = make_int2(x, y);
int2 image_offset_index_2d_clamped = make_int2(MIN(nx - 1, MAX(0, image_offset_index_2d.x)), MIN(ny - 1, MAX(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y) + image_offset_index_2d_clamped.x;
unsigned char g_value = d_g[image_offset_index_1d_clamped];
unsigned char b_value = d_b[image_offset_index_1d_clamped];
unsigned int gb_average = (g_value + b_value) / 2;
d_r_output[image_offset_index_1d_clamped] = (unsigned char)gb_average;
}
}
}
}
struct splitChannels : thrust::unary_function<uchar4, thrust::tuple<unsigned char, unsigned char, unsigned char> > {
__host__ __device__
thrust::tuple<unsigned char, unsigned char, unsigned char> operator()(uchar4 pixel) {
return thrust::make_tuple(pixel.x, pixel.y, pixel.z);
}
};
struct combineChannels : thrust::unary_function<thrust::tuple<unsigned char, unsigned char, unsigned char>, uchar4> {
__host__ __device__
uchar4 operator()(thrust::tuple<unsigned char, unsigned char, unsigned char> t) {
return make_uchar4(thrust::get<0>(t), thrust::get<1>(t), thrust::get<2>(t), 255);
}
};
struct combineResponses : thrust::unary_function<float, thrust::tuple<float, float, float> > {
__host__ __device__
float operator()(thrust::tuple<float, float, float> t) {
return thrust::get<0>(t) * thrust::get<1>(t) * thrust::get<2>(t);
}
};
//we need to save the input so we can remove the redeye for the output
static thrust::device_vector<unsigned char> d_red;
static thrust::device_vector<unsigned char> d_blue;
static thrust::device_vector<unsigned char> d_green;
static size_t numRowsImg;
static size_t numColsImg;
static size_t templateHalfWidth;
static size_t templateHalfHeight;
//return types are void since any internal error will be handled by quitting
//no point in returning error codes...
void preProcess(unsigned int** inputVals,
unsigned int** inputPos,
unsigned int** outputVals,
unsigned int** outputPos,
size_t& numElem,
const std::string& filename,
const std::string& templateFilename) {
//make sure the context initializes ok
checkCudaErrors(hipFree(0));
uchar4* inImg;
uchar4* eyeTemplate;
size_t numRowsTemplate, numColsTemplate;
loadImageRGBA(filename, &inImg, &numRowsImg, &numColsImg);
loadImageRGBA(templateFilename, &eyeTemplate, &numRowsTemplate, &numColsTemplate);
templateHalfWidth = (numColsTemplate - 1) / 2;
templateHalfHeight = (numRowsTemplate - 1) / 2;
//we need to split each image into its separate channels
//use thrust to demonstrate basic uses
numElem = numRowsImg * numColsImg;
size_t templateSize = numRowsTemplate * numColsTemplate;
thrust::device_vector<uchar4> d_Img(inImg, inImg + numRowsImg * numColsImg);
thrust::device_vector<uchar4> d_Template(eyeTemplate, eyeTemplate + numRowsTemplate * numColsTemplate);
d_red.resize(numElem);
d_blue.resize(numElem);
d_green.resize(numElem);
thrust::device_vector<unsigned char> d_red_template(templateSize);
thrust::device_vector<unsigned char> d_blue_template(templateSize);
thrust::device_vector<unsigned char> d_green_template(templateSize);
//split the image
thrust::transform(d_Img.begin(), d_Img.end(), thrust::make_zip_iterator(
thrust::make_tuple(d_red.begin(),
d_blue.begin(),
d_green.begin())),
splitChannels());
//split the template
thrust::transform(d_Template.begin(), d_Template.end(),
thrust::make_zip_iterator(thrust::make_tuple(d_red_template.begin(),
d_blue_template.begin(),
d_green_template.begin())),
splitChannels());
thrust::device_vector<float> d_red_response(numElem);
thrust::device_vector<float> d_blue_response(numElem);
thrust::device_vector<float> d_green_response(numElem);
//need to compute the mean for each template channel
unsigned int r_sum = thrust::reduce(d_red_template.begin(), d_red_template.end(), 0);
unsigned int b_sum = thrust::reduce(d_blue_template.begin(), d_blue_template.end(), 0);
unsigned int g_sum = thrust::reduce(d_green_template.begin(), d_green_template.end(), 0);
float r_mean = (double)r_sum / templateSize;
float b_mean = (double)b_sum / templateSize;
float g_mean = (double)g_sum / templateSize;
const dim3 blockSize(32, 8, 1);
const dim3 gridSize((numColsImg + blockSize.x - 1) / blockSize.x, (numRowsImg + blockSize.y - 1) / blockSize.y, 1);
//now compute the cross-correlations for each channel
naive_normalized_cross_correlation << <gridSize, blockSize >> > (thrust::raw_pointer_cast(d_red_response.data()),
thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_red_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, r_mean);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
naive_normalized_cross_correlation << <gridSize, blockSize >> > (thrust::raw_pointer_cast(d_blue_response.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_blue_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, b_mean);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
naive_normalized_cross_correlation << <gridSize, blockSize >> > (thrust::raw_pointer_cast(d_green_response.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_green_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, g_mean);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//generate combined response - multiply all channels together
thrust::device_vector<float> d_combined_response(numElem);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(
d_red_response.begin(),
d_blue_response.begin(),
d_green_response.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_red_response.end(),
d_blue_response.end(),
d_green_response.end())),
d_combined_response.begin(),
combineResponses());
//find max/min of response
typedef thrust::device_vector<float>::iterator floatIt;
thrust::pair<floatIt, floatIt> minmax = thrust::minmax_element(d_combined_response.begin(), d_combined_response.end());
float bias = *minmax.first;
//we need to make all the numbers positive so that the students can sort them without any bit twiddling
thrust::transform(d_combined_response.begin(), d_combined_response.end(), thrust::make_constant_iterator(-bias),
d_combined_response.begin(), thrust::plus<float>());
//now we need to create the 1-D coordinates that will be attached to the keys
thrust::device_vector<unsigned int> coords(numElem);
thrust::sequence(coords.begin(), coords.end()); //[0, ..., numElem - 1]
//allocate memory for output and copy since our device vectors will go out of scope
//and be deleted
checkCudaErrors(hipMalloc(inputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMalloc(inputPos, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMalloc(outputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMalloc(outputPos, sizeof(unsigned int) * numElem));
hipMemcpy(*inputVals, thrust::raw_pointer_cast(d_combined_response.data()), sizeof(unsigned int) * numElem, hipMemcpyDeviceToDevice);
hipMemcpy(*inputPos, thrust::raw_pointer_cast(coords.data()), sizeof(unsigned int) * numElem, hipMemcpyDeviceToDevice);
checkCudaErrors(hipMemset(*outputVals, 0, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMemset(*outputPos, 0, sizeof(unsigned int) * numElem));
}
void postProcess(const unsigned int* const outputVals,
const unsigned int* const outputPos,
const size_t numElems,
const std::string& output_file) {
thrust::device_vector<unsigned char> d_output_red = d_red;
const dim3 blockSize(256, 1, 1);
const dim3 gridSize((40 + blockSize.x - 1) / blockSize.x, 1, 1);
remove_redness_from_coordinates << <gridSize, blockSize >> > (outputPos,
thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_output_red.data()),
40,
numRowsImg, numColsImg,
9, 9);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//combine the new red channel with original blue and green for output
thrust::device_vector<uchar4> d_outputImg(numElems);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(d_output_red.begin(), d_blue.begin(), d_green.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_output_red.end(), d_blue.end(), d_green.end())), d_outputImg.begin(),
combineChannels());
thrust::host_vector<uchar4> h_Img = d_outputImg;
saveImageRGBA(&h_Img[0], numRowsImg, numColsImg, output_file);
//Clear the global vectors otherwise something goes wrong trying to free them
d_red.clear(); d_red.shrink_to_fit();
d_blue.clear(); d_blue.shrink_to_fit();
d_green.clear(); d_green.shrink_to_fit();
}
| bd05edac4b9530d76524dd797b772d010e654864.cu | #include "utils.h"
#include <algorithm>
#include <cuda.h>
#include <cuda_runtime.h>
#include <string>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/extrema.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "loadSaveImage.h"
#include <stdio.h>
#define MAX(x,y) ((x)>(y)?(x):(y))
#define MIN(x,y) ((x)<(y)?(x):(y))
//simple cross correlation kernel copied from Mike's IPython Notebook
__global__ void naive_normalized_cross_correlation(
float* d_response,
unsigned char* d_original,
unsigned char* d_template,
int num_pixels_y,
int num_pixels_x,
int template_half_height,
int template_height,
int template_half_width,
int template_width,
int template_size,
float template_mean
)
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int knx = template_width;
int2 image_index_2d = make_int2((blockIdx.x * blockDim.x) + threadIdx.x, (blockIdx.y * blockDim.y) + threadIdx.y);
int image_index_1d = (nx * image_index_2d.y) + image_index_2d.x;
if (image_index_2d.x < nx && image_index_2d.y < ny)
{
//
// compute image mean
//
float image_sum = 0.0f;
for (int y = -template_half_height; y <= template_half_height; y++)
{
for (int x = -template_half_width; x <= template_half_width; x++)
{
int2 image_offset_index_2d = make_int2(image_index_2d.x + x, image_index_2d.y + y);
int2 image_offset_index_2d_clamped = make_int2(MIN(nx - 1, MAX(0, image_offset_index_2d.x)), MIN(ny - 1, MAX(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y) + image_offset_index_2d_clamped.x;
unsigned char image_offset_value = d_original[image_offset_index_1d_clamped];
image_sum += (float)image_offset_value;
}
}
float image_mean = image_sum / (float)template_size;
//
// compute sums
//
float sum_of_image_template_diff_products = 0.0f;
float sum_of_squared_image_diffs = 0.0f;
float sum_of_squared_template_diffs = 0.0f;
for (int y = -template_half_height; y <= template_half_height; y++)
{
for (int x = -template_half_width; x <= template_half_width; x++)
{
int2 image_offset_index_2d = make_int2(image_index_2d.x + x, image_index_2d.y + y);
int2 image_offset_index_2d_clamped = make_int2(MIN(nx - 1, MAX(0, image_offset_index_2d.x)), MIN(ny - 1, MAX(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y) + image_offset_index_2d_clamped.x;
unsigned char image_offset_value = d_original[image_offset_index_1d_clamped];
float image_diff = (float)image_offset_value - image_mean;
int2 template_index_2d = make_int2(x + template_half_width, y + template_half_height);
int template_index_1d = (knx * template_index_2d.y) + template_index_2d.x;
unsigned char template_value = d_template[template_index_1d];
float template_diff = template_value - template_mean;
float image_template_diff_product = image_offset_value * template_diff;
float squared_image_diff = image_diff * image_diff;
float squared_template_diff = template_diff * template_diff;
sum_of_image_template_diff_products += image_template_diff_product;
sum_of_squared_image_diffs += squared_image_diff;
sum_of_squared_template_diffs += squared_template_diff;
}
}
//
// compute final result
//
float result_value = 0.0f;
if (sum_of_squared_image_diffs != 0 && sum_of_squared_template_diffs != 0)
{
result_value = sum_of_image_template_diff_products / sqrt(sum_of_squared_image_diffs * sum_of_squared_template_diffs);
}
d_response[image_index_1d] = result_value;
}
}
__global__ void remove_redness_from_coordinates(
const unsigned int* d_coordinates,
unsigned char* d_r,
unsigned char* d_b,
unsigned char* d_g,
unsigned char* d_r_output,
int num_coordinates,
int num_pixels_y,
int num_pixels_x,
int template_half_height,
int template_half_width
)
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int global_index_1d = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgSize = num_pixels_x * num_pixels_y;
if (global_index_1d < num_coordinates)
{
unsigned int image_index_1d = d_coordinates[imgSize - global_index_1d - 1];
ushort2 image_index_2d = make_ushort2(image_index_1d % num_pixels_x, image_index_1d / num_pixels_x);
for (int y = image_index_2d.y - template_half_height; y <= image_index_2d.y + template_half_height; y++)
{
for (int x = image_index_2d.x - template_half_width; x <= image_index_2d.x + template_half_width; x++)
{
int2 image_offset_index_2d = make_int2(x, y);
int2 image_offset_index_2d_clamped = make_int2(MIN(nx - 1, MAX(0, image_offset_index_2d.x)), MIN(ny - 1, MAX(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y) + image_offset_index_2d_clamped.x;
unsigned char g_value = d_g[image_offset_index_1d_clamped];
unsigned char b_value = d_b[image_offset_index_1d_clamped];
unsigned int gb_average = (g_value + b_value) / 2;
d_r_output[image_offset_index_1d_clamped] = (unsigned char)gb_average;
}
}
}
}
struct splitChannels : thrust::unary_function<uchar4, thrust::tuple<unsigned char, unsigned char, unsigned char> > {
__host__ __device__
thrust::tuple<unsigned char, unsigned char, unsigned char> operator()(uchar4 pixel) {
return thrust::make_tuple(pixel.x, pixel.y, pixel.z);
}
};
struct combineChannels : thrust::unary_function<thrust::tuple<unsigned char, unsigned char, unsigned char>, uchar4> {
__host__ __device__
uchar4 operator()(thrust::tuple<unsigned char, unsigned char, unsigned char> t) {
return make_uchar4(thrust::get<0>(t), thrust::get<1>(t), thrust::get<2>(t), 255);
}
};
struct combineResponses : thrust::unary_function<float, thrust::tuple<float, float, float> > {
__host__ __device__
float operator()(thrust::tuple<float, float, float> t) {
return thrust::get<0>(t) * thrust::get<1>(t) * thrust::get<2>(t);
}
};
//we need to save the input so we can remove the redeye for the output
static thrust::device_vector<unsigned char> d_red;
static thrust::device_vector<unsigned char> d_blue;
static thrust::device_vector<unsigned char> d_green;
static size_t numRowsImg;
static size_t numColsImg;
static size_t templateHalfWidth;
static size_t templateHalfHeight;
//return types are void since any internal error will be handled by quitting
//no point in returning error codes...
void preProcess(unsigned int** inputVals,
unsigned int** inputPos,
unsigned int** outputVals,
unsigned int** outputPos,
size_t& numElem,
const std::string& filename,
const std::string& templateFilename) {
//make sure the context initializes ok
checkCudaErrors(cudaFree(0));
uchar4* inImg;
uchar4* eyeTemplate;
size_t numRowsTemplate, numColsTemplate;
loadImageRGBA(filename, &inImg, &numRowsImg, &numColsImg);
loadImageRGBA(templateFilename, &eyeTemplate, &numRowsTemplate, &numColsTemplate);
templateHalfWidth = (numColsTemplate - 1) / 2;
templateHalfHeight = (numRowsTemplate - 1) / 2;
//we need to split each image into its separate channels
//use thrust to demonstrate basic uses
numElem = numRowsImg * numColsImg;
size_t templateSize = numRowsTemplate * numColsTemplate;
thrust::device_vector<uchar4> d_Img(inImg, inImg + numRowsImg * numColsImg);
thrust::device_vector<uchar4> d_Template(eyeTemplate, eyeTemplate + numRowsTemplate * numColsTemplate);
d_red.resize(numElem);
d_blue.resize(numElem);
d_green.resize(numElem);
thrust::device_vector<unsigned char> d_red_template(templateSize);
thrust::device_vector<unsigned char> d_blue_template(templateSize);
thrust::device_vector<unsigned char> d_green_template(templateSize);
//split the image
thrust::transform(d_Img.begin(), d_Img.end(), thrust::make_zip_iterator(
thrust::make_tuple(d_red.begin(),
d_blue.begin(),
d_green.begin())),
splitChannels());
//split the template
thrust::transform(d_Template.begin(), d_Template.end(),
thrust::make_zip_iterator(thrust::make_tuple(d_red_template.begin(),
d_blue_template.begin(),
d_green_template.begin())),
splitChannels());
thrust::device_vector<float> d_red_response(numElem);
thrust::device_vector<float> d_blue_response(numElem);
thrust::device_vector<float> d_green_response(numElem);
//need to compute the mean for each template channel
unsigned int r_sum = thrust::reduce(d_red_template.begin(), d_red_template.end(), 0);
unsigned int b_sum = thrust::reduce(d_blue_template.begin(), d_blue_template.end(), 0);
unsigned int g_sum = thrust::reduce(d_green_template.begin(), d_green_template.end(), 0);
float r_mean = (double)r_sum / templateSize;
float b_mean = (double)b_sum / templateSize;
float g_mean = (double)g_sum / templateSize;
const dim3 blockSize(32, 8, 1);
const dim3 gridSize((numColsImg + blockSize.x - 1) / blockSize.x, (numRowsImg + blockSize.y - 1) / blockSize.y, 1);
//now compute the cross-correlations for each channel
naive_normalized_cross_correlation << <gridSize, blockSize >> > (thrust::raw_pointer_cast(d_red_response.data()),
thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_red_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, r_mean);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
naive_normalized_cross_correlation << <gridSize, blockSize >> > (thrust::raw_pointer_cast(d_blue_response.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_blue_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, b_mean);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
naive_normalized_cross_correlation << <gridSize, blockSize >> > (thrust::raw_pointer_cast(d_green_response.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_green_template.data()),
numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate,
templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, g_mean);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//generate combined response - multiply all channels together
thrust::device_vector<float> d_combined_response(numElem);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(
d_red_response.begin(),
d_blue_response.begin(),
d_green_response.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_red_response.end(),
d_blue_response.end(),
d_green_response.end())),
d_combined_response.begin(),
combineResponses());
//find max/min of response
typedef thrust::device_vector<float>::iterator floatIt;
thrust::pair<floatIt, floatIt> minmax = thrust::minmax_element(d_combined_response.begin(), d_combined_response.end());
float bias = *minmax.first;
//we need to make all the numbers positive so that the students can sort them without any bit twiddling
thrust::transform(d_combined_response.begin(), d_combined_response.end(), thrust::make_constant_iterator(-bias),
d_combined_response.begin(), thrust::plus<float>());
//now we need to create the 1-D coordinates that will be attached to the keys
thrust::device_vector<unsigned int> coords(numElem);
thrust::sequence(coords.begin(), coords.end()); //[0, ..., numElem - 1]
//allocate memory for output and copy since our device vectors will go out of scope
//and be deleted
checkCudaErrors(cudaMalloc(inputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMalloc(inputPos, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMalloc(outputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMalloc(outputPos, sizeof(unsigned int) * numElem));
cudaMemcpy(*inputVals, thrust::raw_pointer_cast(d_combined_response.data()), sizeof(unsigned int) * numElem, cudaMemcpyDeviceToDevice);
cudaMemcpy(*inputPos, thrust::raw_pointer_cast(coords.data()), sizeof(unsigned int) * numElem, cudaMemcpyDeviceToDevice);
checkCudaErrors(cudaMemset(*outputVals, 0, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMemset(*outputPos, 0, sizeof(unsigned int) * numElem));
}
void postProcess(const unsigned int* const outputVals,
const unsigned int* const outputPos,
const size_t numElems,
const std::string& output_file) {
thrust::device_vector<unsigned char> d_output_red = d_red;
const dim3 blockSize(256, 1, 1);
const dim3 gridSize((40 + blockSize.x - 1) / blockSize.x, 1, 1);
remove_redness_from_coordinates << <gridSize, blockSize >> > (outputPos,
thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_output_red.data()),
40,
numRowsImg, numColsImg,
9, 9);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//combine the new red channel with original blue and green for output
thrust::device_vector<uchar4> d_outputImg(numElems);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(d_output_red.begin(), d_blue.begin(), d_green.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_output_red.end(), d_blue.end(), d_green.end())), d_outputImg.begin(),
combineChannels());
thrust::host_vector<uchar4> h_Img = d_outputImg;
saveImageRGBA(&h_Img[0], numRowsImg, numColsImg, output_file);
//Clear the global vectors otherwise something goes wrong trying to free them
d_red.clear(); d_red.shrink_to_fit();
d_blue.clear(); d_blue.shrink_to_fit();
d_green.clear(); d_green.shrink_to_fit();
}
|
0f27f7680034c725bbdc1ce77286261499253c42.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define NUM_THREADS 1024
#define NUM_BLOCKS 32768
#define NUM_VALUES NUM_THREADS*NUM_BLOCKS
void InitV(int *v);
void bitonic_sort(int *dev_values);
void test(int *v);
__global__ void bitonic_sort_step(int *dev_values, int j, int k){
int i, ixj; // Sorting partners: i and ixj
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
if ((ixj) > i) {
if ((i & k) == 0) {
if (dev_values[i] > dev_values[ixj]) {
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
if (dev_values[i] < dev_values[ixj]) {
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
void bitonic_sort(int *dev_values){
dim3 numBlocks(NUM_BLOCKS, 1);
dim3 numThreads(NUM_THREADS, 1);
int j, k;
for (k = 2; k <= NUM_VALUES; k = 2 * k) {
for (j = k >> 1; j > 0; j = j >> 1) {
hipLaunchKernelGGL(( bitonic_sort_step), dim3(numBlocks), dim3(numThreads), 0, 0, dev_values, j, k);
}
}
}
int main(){
srand(time(NULL));
int *host_values, *dev_values, *original_values;
float TiempoTotal, TiempoKernel;
hipEvent_t E0, E1, E2, E3;
hipEventCreate(&E0);
hipEventCreate(&E1);
hipEventCreate(&E2);
hipEventCreate(&E3);
unsigned int numBytes = NUM_VALUES * sizeof(int);
//obtenemos memoria en el host
//host_values = (int*) malloc(numBytes);
//original_values = (int*) malloc(numBytes);
hipHostMalloc( &host_values, numBytes);
hipHostMalloc( &original_values, numBytes);
//inicializamos el vector
InitV(original_values);
hipEventRecord(E0, 0);
hipEventSynchronize(E0);
memcpy(host_values, original_values, numBytes);
//obtenemos memoria en el device
hipMalloc((int**)&dev_values, numBytes);
hipEventRecord(E1, 0);
hipEventSynchronize(E1);
//Copiamos datos del host al device
hipMemcpy(dev_values, host_values, numBytes, hipMemcpyHostToDevice);
//Ejecutamos el kernel
bitonic_sort(dev_values);
hipEventRecord(E2, 0);
hipEventSynchronize(E2);
//Obtener el resultado desde el host
hipMemcpy( host_values, dev_values, numBytes, hipMemcpyDeviceToHost);
//hacemos un test para comprobar el orden si es correcto
test(host_values);
//Liberar memoria del device y del host
hipFree(dev_values);
hipFree(original_values);
hipFree(host_values);
hipDeviceSynchronize();
hipEventRecord(E3, 0);
hipEventSynchronize(E3);
hipEventElapsedTime(&TiempoTotal, E0, E3);
hipEventElapsedTime(&TiempoKernel, E1, E2);
printf("num Threads: %d\n", NUM_THREADS);
printf("num Blocs: %d\n", NUM_BLOCKS);
printf("Tiempo global: %4.6f milseg\n", TiempoTotal);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
hipEventDestroy(E0);
hipEventDestroy(E1);
hipEventDestroy(E2);
hipEventDestroy(E3);
}
void InitV(int *v) {
int i;
for (i = 0; i < NUM_VALUES; i++) {
v[i] = rand();
}
}
void test (int *v) {
int i;
int val = v[0];
for (i = 1; i < NUM_VALUES; ++i) {
if (v[i] < val) {
printf("val: %d, v[%d]: %d.\n", val, i, v[i]);
printf("TEST FAIL\n\n");
return;
} else {
val = v[i];
}
}
printf("TEST OK\n\n");
}
| 0f27f7680034c725bbdc1ce77286261499253c42.cu | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <cuda.h>
#define NUM_THREADS 1024
#define NUM_BLOCKS 32768
#define NUM_VALUES NUM_THREADS*NUM_BLOCKS
void InitV(int *v);
void bitonic_sort(int *dev_values);
void test(int *v);
__global__ void bitonic_sort_step(int *dev_values, int j, int k){
int i, ixj; // Sorting partners: i and ixj
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
if ((ixj) > i) {
if ((i & k) == 0) {
if (dev_values[i] > dev_values[ixj]) {
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
if (dev_values[i] < dev_values[ixj]) {
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
void bitonic_sort(int *dev_values){
dim3 numBlocks(NUM_BLOCKS, 1);
dim3 numThreads(NUM_THREADS, 1);
int j, k;
for (k = 2; k <= NUM_VALUES; k = 2 * k) {
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step<<<numBlocks, numThreads>>>(dev_values, j, k);
}
}
}
int main(){
srand(time(NULL));
int *host_values, *dev_values, *original_values;
float TiempoTotal, TiempoKernel;
cudaEvent_t E0, E1, E2, E3;
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventCreate(&E2);
cudaEventCreate(&E3);
unsigned int numBytes = NUM_VALUES * sizeof(int);
//obtenemos memoria en el host
//host_values = (int*) malloc(numBytes);
//original_values = (int*) malloc(numBytes);
cudaMallocHost( &host_values, numBytes);
cudaMallocHost( &original_values, numBytes);
//inicializamos el vector
InitV(original_values);
cudaEventRecord(E0, 0);
cudaEventSynchronize(E0);
memcpy(host_values, original_values, numBytes);
//obtenemos memoria en el device
cudaMalloc((int**)&dev_values, numBytes);
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
//Copiamos datos del host al device
cudaMemcpy(dev_values, host_values, numBytes, cudaMemcpyHostToDevice);
//Ejecutamos el kernel
bitonic_sort(dev_values);
cudaEventRecord(E2, 0);
cudaEventSynchronize(E2);
//Obtener el resultado desde el host
cudaMemcpy( host_values, dev_values, numBytes, cudaMemcpyDeviceToHost);
//hacemos un test para comprobar el orden si es correcto
test(host_values);
//Liberar memoria del device y del host
cudaFree(dev_values);
cudaFree(original_values);
cudaFree(host_values);
cudaDeviceSynchronize();
cudaEventRecord(E3, 0);
cudaEventSynchronize(E3);
cudaEventElapsedTime(&TiempoTotal, E0, E3);
cudaEventElapsedTime(&TiempoKernel, E1, E2);
printf("num Threads: %d\n", NUM_THREADS);
printf("num Blocs: %d\n", NUM_BLOCKS);
printf("Tiempo global: %4.6f milseg\n", TiempoTotal);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
cudaEventDestroy(E0);
cudaEventDestroy(E1);
cudaEventDestroy(E2);
cudaEventDestroy(E3);
}
void InitV(int *v) {
int i;
for (i = 0; i < NUM_VALUES; i++) {
v[i] = rand();
}
}
void test (int *v) {
int i;
int val = v[0];
for (i = 1; i < NUM_VALUES; ++i) {
if (v[i] < val) {
printf("val: %d, v[%d]: %d.\n", val, i, v[i]);
printf("TEST FAIL\n\n");
return;
} else {
val = v[i];
}
}
printf("TEST OK\n\n");
}
|
e64c3fc2a457b11225457a329cf97ae77dd9ade0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixColour (float *a, float *b, int n){
int j= blockDim.x * blockIdx.x + threadIdx.x;
printf("Block = %d ; Thread = %d \n",blockIdx.x+1, threadIdx.x+1);
// if(j<n){
// for (int i=0; i<n; i++){
//// printf("Block = %d ; Thread = %d ; i = %d ; %f\n",blockIdx.x+1, j+1,i+1,b[i]);
// if (a[j*n+i]==1){
// if (b[j]==b[i]){
// b[j]=-1;
// break;
// }
// }
// }
// }
int *colour = new int[n];
memset(colour, 0, n*sizeof(int));
if (j<n){
for (int i=0; i<n; i++){
//printf("Thread = %d ; i = %d ; %f\n",j+1,i+1,b[i]);
printf("Block = %d ; Thread = %d First For i = %d\n",blockIdx.x+1, threadIdx.x+1, i+1);
if (a[j*n+i]==1 && b[i]!=-1){
colour[(int)b[i]]=1;
}
// if (i==j){
// //atomicAdd(&b[i],1.0f);
// b[i]+=1.0f;
// }
}
for (int i=0; i<n; i++){
if (colour[i]==0){
printf("Block = %d ; Thread = %d Second For i = %d\n",blockIdx.x+1, threadIdx.x+1, i+1);
atomicAdd(&b[j],(float)i-b[j]);
break;
}
}
// for (int i=0; i<n; i++){
// printf("Third Block = %d ; ThreadId = %d ; Thread = %d ; i = %d ; %f\n",blockIdx.x+1, threadIdx.x+1, j+1,i+1,b[i]);
// }
}
// printf("I am thread no: %d from blocknumber: %d\n", threadIdx.x, blockIdx.x);
//b[j] = j+1;
} | e64c3fc2a457b11225457a329cf97ae77dd9ade0.cu | #include "includes.h"
__global__ void matrixColour (float *a, float *b, int n){
int j= blockDim.x * blockIdx.x + threadIdx.x;
printf("Block = %d ; Thread = %d \n",blockIdx.x+1, threadIdx.x+1);
// if(j<n){
// for (int i=0; i<n; i++){
//// printf("Block = %d ; Thread = %d ; i = %d ; %f\n",blockIdx.x+1, j+1,i+1,b[i]);
// if (a[j*n+i]==1){
// if (b[j]==b[i]){
// b[j]=-1;
// break;
// }
// }
// }
// }
int *colour = new int[n];
memset(colour, 0, n*sizeof(int));
if (j<n){
for (int i=0; i<n; i++){
//printf("Thread = %d ; i = %d ; %f\n",j+1,i+1,b[i]);
printf("Block = %d ; Thread = %d First For i = %d\n",blockIdx.x+1, threadIdx.x+1, i+1);
if (a[j*n+i]==1 && b[i]!=-1){
colour[(int)b[i]]=1;
}
// if (i==j){
// //atomicAdd(&b[i],1.0f);
// b[i]+=1.0f;
// }
}
for (int i=0; i<n; i++){
if (colour[i]==0){
printf("Block = %d ; Thread = %d Second For i = %d\n",blockIdx.x+1, threadIdx.x+1, i+1);
atomicAdd(&b[j],(float)i-b[j]);
break;
}
}
// for (int i=0; i<n; i++){
// printf("Third Block = %d ; ThreadId = %d ; Thread = %d ; i = %d ; %f\n",blockIdx.x+1, threadIdx.x+1, j+1,i+1,b[i]);
// }
}
// printf("I am thread no: %d from blocknumber: %d\n", threadIdx.x, blockIdx.x);
//b[j] = j+1;
} |
9657f57d3e83a3233492291954153225eb109ea9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __CUDA_COMPILED__
#define __CUDA_COMPILED__
#endif
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <assert.h>
#include <cutil_inline.h>
using namespace std;
//#define THREADS_PER_BLOCK 512
#define MAX_TYPE_SIZE 512
//#define N 512
#define NOMINAL 1
#define ENTER 2
#define REAL 3
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif
struct __align__ (8) ClassifierInfo {
int numAtt;
int predictedClass;
};
unsigned char *d_chromosome;
float *d_predicates;
int *d_whichAtt;
ClassifierInfo *d_info;
int *d_offsetPred;
float *d_realValues;
int *d_realClasses;
__constant__ int c_typeOfAttribute[MAX_TYPE_SIZE];
__constant__ int c_numAtts[1];
__constant__ int c_offsetAttribute[MAX_TYPE_SIZE];
int threadsPerBlock;
//template < unsigned int blockSize >
__global__ void reduction6(int *entrada, int * last, int totalObjects,
int arraySize, int a);
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchMixed(int insPerRun,
int classPerRun,
int maxNumAtt, int ruleSize, int numAttIns,
float *predicates,
int *whichAtt,
ClassifierInfo * info,
int * offsetPredicates,
float *realValues,
int *realClasses,
int *numIns,
int *finalStruct);
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchReal(int insPerRun,
int classPerRun,
int maxNumAtt, int numAttIns,
float *predicates,
int *whichAtt,
ClassifierInfo * info,
float *realValues,
int *realClasses,
int *numIns,
int *finalStruct);
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchNominal(int insPerRun,
int classPerRun,
int ruleSize,
unsigned char *chromosome,
float *realValues,
int *realClasses,
int *numIns);
inline int setDeviceFirstTime() {
// Determine de number of cuda enabled devices available.
int deviceCount;
hipGetDeviceCount(&deviceCount);
int deviceSelected;
// If there is a device available we iterate over the device to select the device with the larger capacity
if (deviceCount > 0) {
int device;
int maxCapacityFound = 0;
hipDeviceProp_t prop;
for (device = 0; device < deviceCount; device++) {
hipGetDeviceProperties(&prop, device);
if (prop.totalGlobalMem > maxCapacityFound) {
maxCapacityFound = prop.totalGlobalMem;
deviceSelected = device;
// A device with a larger capacity was found
}
}
return deviceSelected;
} else {
fprintf(stderr, "There are not CUDA enabled devices available");
exit(1);
}
}
extern "C" void setDevice(size_t * memDevice, size_t * memPerBlock, int * tBlock,
int * deviceSelected, double percent) {
// Sets 256MB as the global memory in case of using device emulation.
#ifdef __DEVICE_EMULATION__
*memDevice = 268435456;
*memPerBlock = 16384;
*tBlock = 512
threadsPerBlock = 512;
#endif
#ifndef __DEVICE_EMULATION__
hipDeviceProp_t prop;
if (*deviceSelected == -1) {
*deviceSelected = setDeviceFirstTime();
}
if (*memDevice == 0 || *memPerBlock == 0 && *tBlock == 0) {
// If a device was already set previously we collect the data
hipSetDevice(*deviceSelected);
hipGetDeviceProperties(&prop, *deviceSelected);
// if(*deviceSelected == 0) {
//double percent = cm.getParameter(PERC_DEVICE_MEM);
*memDevice = (size_t) floor(percent*prop.totalGlobalMem);
fprintf(stdout, "Using %f of device memory %lld\n", percent, prop.totalGlobalMem);
// } else {
// *memDevice = prop.totalGlobalMem;
// }
*memPerBlock = prop.sharedMemPerBlock;
*tBlock = prop.maxThreadsPerBlock;
threadsPerBlock = prop.maxThreadsPerBlock;
//threadsPerBlock = 512;
// *memDevice = 1024*12;
fprintf(stdout, "Set mem device %lld memBlock %lld threads %d Device %d\n", *memDevice, *memPerBlock, threadsPerBlock, *deviceSelected);
}
#endif
}
void launchMatchReal(int instancesPerRun, int classifiersPerRun, int blockX,
int maxNumAtt, int numAttIns, int shared_mem_size, int *d_numIns,
int *finalStruct,
int strataOffset) {
// Initialize block and grid size
dim3 grid = dim3(blockX, classifiersPerRun, 1);
dim3 threads = dim3(threadsPerBlock, 1, 1);
// Calculate the match and predicted for each instance and classifier pair.
hipLaunchKernelGGL(( cudaCalculateMatchReal), dim3(grid), dim3(threads),
shared_mem_size * 3 , 0, instancesPerRun, classifiersPerRun,
maxNumAtt, numAttIns, d_predicates,
d_whichAtt, d_info, &d_realValues[strataOffset*numAttIns],
&d_realClasses[strataOffset], d_numIns, finalStruct);
cutilCheckMsg("Kernel execution failed");
hipDeviceSynchronize();
}
void launchMatchNominal(int instancesPerRun, int classifiersPerRun, int blockX,
int shared_mem_size, int atts, int ruleSize, int *d_numIns,
int strataOffset) {
// Initialize block and grid size
dim3 grid = dim3(blockX, classifiersPerRun, 1);
dim3 threads = dim3(threadsPerBlock, 1, 1);
// Calculate the match and predicted for each instance and classifier pair.
hipLaunchKernelGGL(( cudaCalculateMatchNominal), dim3(grid), dim3(threads),
shared_mem_size * 3 , 0, instancesPerRun, classifiersPerRun, ruleSize,
d_chromosome,
&d_realValues[strataOffset*atts],
&d_realClasses[strataOffset], d_numIns);
cutilCheckMsg("Kernel execution failed");
hipDeviceSynchronize();
}
void launchMatchMixed(int instancesPerRun, int classifiersPerRun, int blockX,
int maxNumAtt, int ruleSize, int numAttIns, int shared_mem_size,
int *d_numIns, int *finalStruct, int strataOffset) {
// Initialize block and grid size
dim3 grid = dim3(blockX, classifiersPerRun, 1);
dim3 threads = dim3(threadsPerBlock, 1, 1);
// Calculate the match and predicted for each instance and classifier pair.
hipLaunchKernelGGL(( cudaCalculateMatchMixed), dim3(grid), dim3(threads),
shared_mem_size * 3 , 0, instancesPerRun, classifiersPerRun,
maxNumAtt, ruleSize, numAttIns, d_predicates,
d_whichAtt, d_info, d_offsetPred, &d_realValues[strataOffset*numAttIns],
&d_realClasses[strataOffset], d_numIns, finalStruct);
cutilCheckMsg("Kernel execution failed");
hipDeviceSynchronize();
}
void launchReduction(int insPerRun, int classPerRun, int shared_mem_size,
int *d_numIns, int *finalStruct) {
int blockX =
(int) ceil((double) insPerRun / ((double) threadsPerBlock * 2));
//Iterates over the three areas created in the first kernel
for (unsigned int a = 0; a < 3; a++) {
int offset = a * insPerRun * classPerRun;
unsigned int numThreads = insPerRun;
// int numBlocks = (int) ceil(blockX / (double) N);
int numBlocks = blockX;
//Runs the reduction until the number of blocks is 0
while (numBlocks > 0) {
// setup execution parameters
dim3 grid(numBlocks, classPerRun, 1);
dim3 threads(threadsPerBlock, 1, 1);
//OJO
hipLaunchKernelGGL(( reduction6), dim3(grid), dim3(threads),
shared_mem_size , 0, &d_numIns[offset], finalStruct,
numThreads, insPerRun,a);
hipDeviceSynchronize();
cutilCheckMsg("Kernel execution failed");
numThreads = numBlocks;
numBlocks = (numBlocks == 1 ? 0 : (int) ceil((double) numThreads
/ (double) (threadsPerBlock)));
// numBlocks = (numBlocks == 1 ? 0 : (int) ceil((double) numThreads
// / (double) (threadsPerBlock * N)));
}
}
}
inline void launchKernelsReal(int instancesPerRun, int classifiersPerRun,
int maxNumAtt, int numAttIns, int classChecked, int *d_numIns, int *finalStruct,
int **counters, int strataOffset) {
unsigned int shared_mem_size = sizeof(int) * threadsPerBlock;
int blockX = (int) ceil((double) instancesPerRun
/
(double) threadsPerBlock);
// ************ first kernel **********
launchMatchReal(instancesPerRun, classifiersPerRun, blockX, maxNumAtt,
numAttIns, shared_mem_size, d_numIns, finalStruct, strataOffset);
// *********** second kernel **********
if (blockX > 1) {
launchReduction(blockX, classifiersPerRun, shared_mem_size, d_numIns, finalStruct);
}
// Copy the memory from device to host and the organize it into de counter structure
int size = sizeof(int) * classifiersPerRun * 3;
int * result = (int *) malloc(size);
cutilSafeCall(hipMemcpy(result, finalStruct, size, hipMemcpyDeviceToHost));
for (unsigned int classi = 0; classi < classifiersPerRun; classi++) {
int offset = classi * 3;
counters[classChecked + classi][0] += result[offset];
counters[classChecked + classi][1] += result[offset + 1];
counters[classChecked + classi][2] += result[offset + 2];
}
}
inline void launchKernelsNominal(int instancesPerRun, int classifiersPerRun,
int classChecked, int atts, int ruleSize, int *d_numIns, int **counters,
int strataOffset) {
unsigned int shared_mem_size = sizeof(int) * threadsPerBlock;
int blockX = (int) ceil((double) instancesPerRun
/
(double) threadsPerBlock);
// ************ first kernel **********
launchMatchNominal(instancesPerRun, classifiersPerRun, blockX,
shared_mem_size, atts, ruleSize, d_numIns, strataOffset);
// *********** second kernel **********
if (blockX > 1) {
launchReduction(blockX, classifiersPerRun, shared_mem_size, d_numIns, d_numIns);
}
int size = sizeof(int) * classifiersPerRun * 3;
int * result = (int *) malloc(size);
cutilSafeCall(hipMemcpy(result, d_numIns, size, hipMemcpyDeviceToHost));
for (unsigned int classi = 0; classi < classifiersPerRun; classi++) {
int offset = classi * 3;
counters[classChecked + classi][0] += result[offset];
counters[classChecked + classi][1] += result[offset + 1];
counters[classChecked + classi][2] += result[offset + 2];
}
}
inline void launchKernelsMixed(int instancesPerRun, int classifiersPerRun,
int maxNumAtt, int ruleSize, int numAttIns, int classChecked,
int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
unsigned int shared_mem_size = sizeof(int) * threadsPerBlock;
int blockX = (int) ceil((double) instancesPerRun
/
(double) threadsPerBlock);
// ************ first kernel **********
launchMatchMixed(instancesPerRun, classifiersPerRun, blockX, maxNumAtt,
ruleSize, numAttIns, shared_mem_size, d_numIns, finalStruct, strataOffset);
// *********** second kernel **********
if (blockX > 1) {
launchReduction(blockX, classifiersPerRun, shared_mem_size, d_numIns, finalStruct);
}
// Copy the memory from device to host and the organize it into de counter structure
int size = sizeof(int) * classifiersPerRun * 3;
int * result = (int *) malloc(size);
cutilSafeCall(hipMemcpy(result, finalStruct, size, hipMemcpyDeviceToHost));
for (unsigned int classi = 0; classi < classifiersPerRun; classi++) {
int offset = classi * 3;
counters[classChecked + classi][0] += result[offset];
counters[classChecked + classi][1] += result[offset + 1];
counters[classChecked + classi][2] += result[offset + 2];
}
}
inline void allocateInstanceMemory(int realValuesSize, int realClassesSize) {
// Allocating instance memory
cutilSafeCall(hipMalloc((void **) &d_realValues, realValuesSize));
cutilSafeCall(hipMalloc((void **) &d_realClasses, realClassesSize));
}
extern "C" void freeInstanceMemory() {
// Setting free the instance memory
hipFree(d_realValues);
hipFree(d_realClasses);
}
inline void allocateClassifiersMemoryReal(int predSize, int whichSize,
int infoSize) {
// Allocating real classifiers memory
cutilSafeCall(hipMalloc((void **) &d_whichAtt, whichSize));
cutilSafeCall(hipMalloc((void **) &d_predicates, predSize));
cutilSafeCall(hipMalloc((void **) &d_info, infoSize));
}
inline void allocateClassifiersMemoryNominal(int ruleSize) {
cutilSafeCall(hipMalloc((void **) &d_chromosome, ruleSize));
}
inline void allocateClassifiersMemoryMixed(int predSize, int whichSize,
int infoSize, int offsetPredSize) {
// Allocating mixed classifiers memory
cutilSafeCall(hipMalloc((void **) &d_whichAtt, whichSize));
cutilSafeCall(hipMalloc((void **) &d_predicates, predSize));
cutilSafeCall(hipMalloc((void **) &d_info, infoSize));
cutilSafeCall(hipMalloc((void **) &d_offsetPred, offsetPredSize));
}
inline void freeClassifiersMemoryReal() {
// Seting free the classifier memory
hipFree(d_predicates);
hipFree(d_info);
hipFree(d_whichAtt);
}
inline void freeClassifiersMemoryNominal() {
// Seting free the classifier memory
hipFree(d_chromosome);
}
inline void freeClassifiersMemoryMixed() {
// Seting free the classifier memory
hipFree(d_predicates);
hipFree(d_info);
hipFree(d_whichAtt);
hipFree(d_offsetPred);
}
extern "C" void copyStaticClassifierInfo(int atts, int *offsetPredicates,
int offsetPredSize) {
// This information is static a doesn't not chance during the whole execution of the GA.
cutilSafeCall(hipMemcpyToSymbol(c_numAtts, &atts, 1, 0,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpyToSymbol(c_offsetAttribute, offsetPredicates,
offsetPredSize, 0, hipMemcpyHostToDevice));
}
inline void copyInstanceMemoryReal(int numInstances, int insChecked, int atts,
int *instancesPerRun, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize) {
// Adjusting the instances per run parameter for the last iteration
if (*instancesPerRun > numInstances - insChecked) {
*instancesPerRun = numInstances - insChecked;
realClassesSize = sizeof(int) * (*instancesPerRun);
realValuesSize = sizeof(float) * (*instancesPerRun) * atts;
}
// Copying the instance data into the device memory
cutilSafeCall(hipMemcpy(d_realValues, &(realValues[insChecked * atts]),
realValuesSize, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_realClasses, &(realClasses[insChecked]),
realClassesSize, hipMemcpyHostToDevice));
}
inline void copyInstanceMemoryMixed(int numInstances, int insChecked, int atts,
int *instancesPerRun, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize, int * typeOfAttributes,
int typeOfAttSize) {
// Adjusting the instances per run parameter for the last iteration
if (*instancesPerRun > numInstances - insChecked) {
*instancesPerRun = numInstances - insChecked;
realClassesSize = sizeof(int) * (*instancesPerRun);
realValuesSize = sizeof(float) * (*instancesPerRun) * atts;
}
// Copying the instance data into the device memory
cutilSafeCall(hipMemcpy(d_realValues, &(realValues[insChecked * atts]),
realValuesSize, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_realClasses, &(realClasses[insChecked]),
realClassesSize, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpyToSymbol(c_typeOfAttribute, typeOfAttributes,
typeOfAttSize, 0, hipMemcpyHostToDevice));
}
// This function is called by functions.cpp to allocate the instances at the beginning
extern "C" void allocateInstanceMemoryCuda(int realValuesSize, int realClassesSize) {
allocateInstanceMemory(realValuesSize, realClassesSize);
}
// This function is called by functions.cpp to copy the instances at the beginning
extern "C" void copyInstancesToDeviceCudaReal(int numInstances, int atts,
int *instancesPerRun, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize) {
copyInstanceMemoryReal(numInstances, 0, atts, instancesPerRun, realValues,
realValuesSize, realClasses, realClassesSize);
}
// This function is called by functions.cpp to copy the instances at the beginning
extern "C" void copyInstancesToDeviceCudaMixed(int numInstances, int atts,
int *instancesPerRun, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize, int * typeOfAttributes,
int typeOfAttSize) {
copyInstanceMemoryMixed(numInstances, 0, atts, instancesPerRun, realValues,
realValuesSize, realClasses, realClassesSize, typeOfAttributes,
typeOfAttSize);
}
inline void copyClassifiersMemoryReal(int popSize, int classChecked,
int maxNumAtt, int *classifiersPerRun, float *predicates, int predSize,
int *whichAtt, int whichSize, ClassifierInfo * info, int infoSize) {
// Adjusting the classifiers per run for the last iterations
if (*classifiersPerRun > popSize - classChecked) {
*classifiersPerRun = popSize - classChecked;
predSize = sizeof(float) * (*classifiersPerRun) * maxNumAtt * 2;
whichSize = sizeof(int) * (*classifiersPerRun) * maxNumAtt;
}
// Copying pop info into the device memory
cutilSafeCall(hipMemcpy(d_predicates, &(predicates[classChecked
* maxNumAtt * 2]), predSize, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_whichAtt, &(whichAtt[classChecked * maxNumAtt]),
whichSize, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_info, &(info[classChecked]), infoSize,
hipMemcpyHostToDevice));
}
inline void copyClassifiersMemoryNominal(int popSize, int classChecked,
int *classifiersPerRun, int ruleSize, unsigned char *chromosome,
int chromosomeSize) {
// Adjusting the classifiers per run for the last iterations
if (*classifiersPerRun > popSize - classChecked) {
*classifiersPerRun = popSize - classChecked;
chromosomeSize = sizeof(unsigned char) * (*classifiersPerRun)
* ruleSize;
}
// Copying pop info into the device memory
cutilSafeCall(hipMemcpy(d_chromosome,
&(chromosome[classChecked * ruleSize]), chromosomeSize,
hipMemcpyHostToDevice));
}
inline void copyClassifiersMemoryMixed(int popSize, int classChecked,
int maxNumAtt, int ruleSize, int *classifiersPerRun, float *predicates,
int predSize, int *whichAtt, int whichSize, ClassifierInfo * info,
int infoSize, int * offsetPred, int offsetPredSize) {
// Adjusting the classifiers per run for the last iterations
if (*classifiersPerRun > popSize - classChecked) {
*classifiersPerRun = popSize - classChecked;
predSize = sizeof(float) * (*classifiersPerRun) * ruleSize;
whichSize = sizeof(int) * (*classifiersPerRun) * maxNumAtt;
offsetPredSize = whichSize;
}
// Copying pop info into the device memory
cutilSafeCall(hipMemcpy(d_predicates,
&(predicates[classChecked * ruleSize]), predSize,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_whichAtt, &(whichAtt[classChecked * maxNumAtt]),
whichSize, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_info, &(info[classChecked]), infoSize,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_offsetPred, &(offsetPred[classChecked
* maxNumAtt]), offsetPredSize, hipMemcpyHostToDevice));
}
inline void iteratingOverClassifiersReal(int popSize, int numInstances,
int maxNumAtt, int atts, int classifiersPerRun, int instancesPerRun,
float *predicates, int predSize, int *whichAtt, int whichSize,
ClassifierInfo * info, int infoSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
// Iterating over the classifiers to copy the info into device memory and calculate de counters
int classifiersPerRunOrig = classifiersPerRun;
for (int classChecked = 0; classChecked < popSize; classChecked
= classChecked + classifiersPerRun) {
copyClassifiersMemoryReal(popSize, classChecked, maxNumAtt,
&classifiersPerRun, predicates, predSize, whichAtt, whichSize,
info, infoSize);
int instancesPerRunOrig = instancesPerRun;
// Iterate over the instances to copy into device memory and calculate the counters
for (int insChecked = 0; insChecked < numInstances; insChecked
= insChecked + instancesPerRun) {
copyInstanceMemoryReal(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize);
launchKernelsReal(instancesPerRun, classifiersPerRun, maxNumAtt,
atts, classChecked, d_numIns, finalStruct, counters, strataOffset);
}
instancesPerRun = instancesPerRunOrig;
}
classifiersPerRun = classifiersPerRunOrig;
}
inline void iteratingOverClassifiersNominal(int popSize, int numInstances,
int atts, int ruleSize, int classifiersPerRun, int instancesPerRun,
unsigned char * chromosome, int chromosomeSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int *d_numIns, int **counters, int strataOffset) {
// Iterating over the classifiers to copy the info into device memory and calculate de counters
int classifiersPerRunOrig = classifiersPerRun;
for (int classChecked = 0; classChecked < popSize; classChecked
= classChecked + classifiersPerRun) {
copyClassifiersMemoryNominal(popSize, classChecked, &classifiersPerRun,
ruleSize, chromosome, chromosomeSize);
int instancesPerRunOrig = instancesPerRun;
// Iterate over the instances to copy into device memory and calculate the counters
for (int insChecked = 0; insChecked < numInstances; insChecked
= insChecked + instancesPerRun) {
copyInstanceMemoryReal(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize);
launchKernelsNominal(instancesPerRun, classifiersPerRun,
classChecked, atts, ruleSize, d_numIns, counters, strataOffset);
}
instancesPerRun = instancesPerRunOrig;
}
classifiersPerRun = classifiersPerRunOrig;
}
inline void iteratingOverClassifiersMixed(int popSize, int numInstances,
int maxNumAtt, int ruleSize, int atts, int classifiersPerRun,
int instancesPerRun, float *predicates, int predSize, int *whichAtt,
int whichSize, ClassifierInfo * info, int infoSize, int * offsetPred,
int offsetPredSize, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize, int * typeOfAttributes,
int typeOfAttSize, int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
// Iterating over the classifiers to copy the info into device memory and calculate de counters
int classifiersPerRunOrig = classifiersPerRun;
for (int classChecked = 0; classChecked < popSize; classChecked
= classChecked + classifiersPerRun) {
copyClassifiersMemoryMixed(popSize, classChecked, maxNumAtt, ruleSize,
&classifiersPerRun, predicates, predSize, whichAtt, whichSize,
info, infoSize, offsetPred, offsetPredSize);
int instancesPerRunOrig = instancesPerRun;
// Iterate over the instances to copy into device memory and calculate the counters
for (int insChecked = 0; insChecked < numInstances; insChecked
= insChecked + instancesPerRun) {
copyInstanceMemoryMixed(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize, typeOfAttributes, typeOfAttSize);
launchKernelsMixed(instancesPerRun, classifiersPerRun, maxNumAtt,
ruleSize, atts, classChecked, d_numIns, finalStruct, counters, strataOffset);
}
instancesPerRun = instancesPerRunOrig;
}
classifiersPerRun = classifiersPerRunOrig;
}
inline void iteratingOverInstancesReal(int popSize, int numInstances,
int maxNumAtt, int atts, int classifiersPerRun, int instancesPerRun,
float *predicates, int predSize, int *whichAtt, int whichSize,
ClassifierInfo * info, int infoSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
// Iterate over the instances to copy into device memory and calculate the counters
int instancesPerRunOrig = instancesPerRun;
for (int insChecked = 0; insChecked < numInstances; insChecked
+= instancesPerRun) {
copyInstanceMemoryReal(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize);
int classifiersPerRunOrig = classifiersPerRun;
// Iterating over the classifiers to copy the info into device memory and calculate de counters
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryReal(popSize, classChecked, maxNumAtt,
&classifiersPerRun, predicates, predSize, whichAtt,
whichSize, info, infoSize);
launchKernelsReal(instancesPerRun, classifiersPerRun, maxNumAtt,
atts, classChecked, d_numIns,finalStruct, counters, strataOffset);
}
classifiersPerRun = classifiersPerRunOrig;
}
instancesPerRun = instancesPerRunOrig;
}
inline void iteratingOverInstancesNominal(int popSize, int numInstances,
int atts, int ruleSize, int classifiersPerRun, int instancesPerRun,
unsigned char * chromosome, int chromosomeSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int *d_numIns, int **counters, int strataOffset) {
// Iterate over the instances to copy into device memory and calculate the counters
int instancesPerRunOrig = instancesPerRun;
for (int insChecked = 0; insChecked < numInstances; insChecked
+= instancesPerRun) {
copyInstanceMemoryReal(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize);
int classifiersPerRunOrig = classifiersPerRun;
// Iterating over the classifiers to copy the info into device memory and calculate de counters
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryNominal(popSize, classChecked,
&classifiersPerRun, ruleSize, chromosome, chromosomeSize);
launchKernelsNominal(instancesPerRun, classifiersPerRun,
classChecked, atts, ruleSize, d_numIns, counters, strataOffset);
}
classifiersPerRun = classifiersPerRunOrig;
}
instancesPerRun = instancesPerRunOrig;
}
inline void iteratingOverInstancesMixed(int popSize, int numInstances,
int maxNumAtt, int ruleSize, int atts, int classifiersPerRun,
int instancesPerRun, float *predicates, int predSize, int *whichAtt,
int whichSize, ClassifierInfo * info, int infoSize, int * offsetPred,
int offsetPredSize, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize, int * typeOfAttributes,
int typeOfAttSize, int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
// Iterate over the instances to copy into device memory and calculate the counters
int instancesPerRunOrig = instancesPerRun;
for (int insChecked = 0; insChecked < numInstances; insChecked
+= instancesPerRun) {
copyInstanceMemoryMixed(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize, typeOfAttributes, typeOfAttSize);
int classifiersPerRunOrig = classifiersPerRun;
// Iterating over the classifiers to copy the info into device memory and calculate de counters
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryMixed(popSize, classChecked, maxNumAtt,
ruleSize, &classifiersPerRun, predicates, predSize,
whichAtt, whichSize, info, infoSize, offsetPred,
offsetPredSize);
launchKernelsMixed(instancesPerRun, classifiersPerRun, maxNumAtt,
ruleSize, atts, classChecked, d_numIns, finalStruct, counters, strataOffset);
}
classifiersPerRun = classifiersPerRunOrig;
}
instancesPerRun = instancesPerRunOrig;
}
void onlyIterateClassifiersReal(int popSize, int maxNumAtt, int atts,
int classifiersPerRun, int instancesPerRun, float *predicates,
int predSize, int *whichAtt, int whichSize, ClassifierInfo * info,
int infoSize, int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryReal(popSize, classChecked, maxNumAtt,
&classifiersPerRun, predicates, predSize, whichAtt, whichSize,
info, infoSize);
launchKernelsReal(instancesPerRun, classifiersPerRun, maxNumAtt, atts,
classChecked, d_numIns, finalStruct, counters, strataOffset);
}
}
void onlyIterateClassifiersNominal(int popSize, int classifiersPerRun,
int instancesPerRun, int atts, int ruleSize, unsigned char *chromosome,
int chromosomeSize, int *d_numIns, int **counters, int strataOffset) {
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryNominal(popSize, classChecked, &classifiersPerRun,
ruleSize, chromosome, chromosomeSize);
launchKernelsNominal(instancesPerRun, classifiersPerRun, classChecked,
atts, ruleSize, d_numIns, counters, strataOffset);
}
}
void onlyIterateClassifiersMixed(int popSize, int maxNumAtt, int ruleSize,
int atts, int classifiersPerRun, int instancesPerRun,
float *predicates, int predSize, int *whichAtt, int whichSize,
ClassifierInfo * info, int infoSize, int * offsetPred,
int offsetPredSize, int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryMixed(popSize, classChecked, maxNumAtt, ruleSize,
&classifiersPerRun, predicates, predSize, whichAtt, whichSize,
info, infoSize, offsetPred, offsetPredSize);
launchKernelsMixed(instancesPerRun, classifiersPerRun, maxNumAtt,
ruleSize, atts, classChecked, d_numIns, finalStruct, counters, strataOffset);
}
}
extern "C" int **calculateFitnessCudaReal(int alreadyAllocatedInstances,
int maxNumAtt, int atts, int numInstances, int popSize,
float *predicates, int predSize, int *whichAtt, int whichSize,
ClassifierInfo * info, int infoSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int instancesPerRun, int classifiersPerRun, int strataOffset) {
// Initializing the counters for each classifier. This counters will be updated
// after each run, because it is possible that we wont be able to check all the
// classifiers at the same time.
int **counters = (int **) malloc(sizeof(int *) * popSize);
for (int i = 0; i < popSize; i++) {
counters[i] = (int *) malloc(sizeof(int) * 3);
counters[i][0] = 0;
counters[i][1] = 0;
counters[i][2] = 0;
}
// Reserving device memory for instances
if (!alreadyAllocatedInstances)
allocateInstanceMemory(realValuesSize, realClassesSize);
//Reserving device memory for classifiers
allocateClassifiersMemoryReal(predSize, whichSize, infoSize);
// Initialize the device output memory
int *d_numIns;
int numInsSize = sizeof(int) * 3 * classifiersPerRun * (int) ceil(
(double) instancesPerRun / (double) threadsPerBlock);
cutilSafeCall(hipMalloc((void **) &d_numIns, numInsSize));
int *finalStruct;
cutilSafeCall(hipMalloc((void **) &finalStruct, sizeof(int) * 3 * classifiersPerRun));
if (alreadyAllocatedInstances) {
onlyIterateClassifiersReal(popSize, maxNumAtt, atts, classifiersPerRun,
instancesPerRun, predicates, predSize, whichAtt, whichSize,
info, infoSize, d_numIns, finalStruct, counters, strataOffset);
} else if (classifiersPerRun == popSize) {
iteratingOverClassifiersReal(popSize, numInstances, maxNumAtt, atts,
classifiersPerRun, instancesPerRun, predicates, predSize,
whichAtt, whichSize, info, infoSize, realValues,
realValuesSize, realClasses, realClassesSize, d_numIns, finalStruct,
counters,strataOffset);
} else {
iteratingOverInstancesReal(popSize, numInstances, maxNumAtt, atts,
classifiersPerRun, instancesPerRun, predicates, predSize,
whichAtt, whichSize, info, infoSize, realValues,
realValuesSize, realClasses, realClassesSize, d_numIns, finalStruct,
counters,strataOffset);
}
if (!alreadyAllocatedInstances)
freeInstanceMemory();
freeClassifiersMemoryReal();
hipFree(d_numIns);
hipFree(finalStruct);
return counters;
}
extern "C" int **calculateFitnessCudaNominal(int alreadyAllocatedInstances,
int numInstances, int popSize, int atts, int ruleSize,
unsigned char *chromosome, int chromosomeSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int instancesPerRun, int classifiersPerRun, int strataOffset) {
// Initializing the counters for each classifier. This counters will be updated
// after each run, because it is possible that we wont be able to check all the
// classifiers at the same time.
int **counters = (int **) malloc(sizeof(int *) * popSize);
for (int i = 0; i < popSize; i++) {
counters[i] = (int *) malloc(sizeof(int) * 3);
counters[i][0] = 0;
counters[i][1] = 0;
counters[i][2] = 0;
}
// Reserving device memory for instances
if (!alreadyAllocatedInstances)
allocateInstanceMemory(realValuesSize, realClassesSize);
//Reserving device memory for classifiers
allocateClassifiersMemoryNominal(ruleSize);
// Initialize the device output memory
int *d_numIns;
int numInsSize = sizeof(int) * 3 * classifiersPerRun * (int) ceil(
(double) instancesPerRun / (double) threadsPerBlock);
cutilSafeCall(hipMalloc((void **) &d_numIns, numInsSize));
if (alreadyAllocatedInstances) {
onlyIterateClassifiersNominal(popSize, classifiersPerRun,
instancesPerRun, atts, ruleSize, chromosome, chromosomeSize,
d_numIns, counters, strataOffset);
} else if (classifiersPerRun == popSize) {
iteratingOverClassifiersNominal(popSize, numInstances, atts, ruleSize,
classifiersPerRun, instancesPerRun, chromosome, chromosomeSize,
realValues, realValuesSize, realClasses, realClassesSize,
d_numIns, counters,strataOffset);
} else {
iteratingOverInstancesNominal(popSize, numInstances, atts, ruleSize,
classifiersPerRun, instancesPerRun, chromosome, chromosomeSize,
realValues, realValuesSize, realClasses, realClassesSize,
d_numIns, counters,strataOffset);
}
if (!alreadyAllocatedInstances)
freeInstanceMemory();
freeClassifiersMemoryNominal();
hipFree(d_numIns);
return counters;
}
extern "C" int **calculateFitnessCudaMixed(int alreadyAllocatedInstances,
int maxNumAtt, int ruleSize, int atts, int numInstances, int popSize,
float *predicates, int predSize, int *whichAtt, int whichSize,
ClassifierInfo * info, int infoSize, int * offsetPred,
int offsetPredSize, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize, int * typeOfAttributes,
int typeOfAttSize, int instancesPerRun, int classifiersPerRun,
int strataOffset) {
// Initializing the counters for each classifier. This counters will be updated
// after each run, because it is possible that we wont be able to check all the
// classifiers at the same time.
int **counters = (int **) malloc(sizeof(int *) * popSize);
for (int i = 0; i < popSize; i++) {
counters[i] = (int *) malloc(sizeof(int) * 3);
counters[i][0] = 0;
counters[i][1] = 0;
counters[i][2] = 0;
}
// Reserving device memory for instances
if (!alreadyAllocatedInstances) {
allocateInstanceMemory(realValuesSize, realClassesSize);
}
//Reserving device memory for classifiers
allocateClassifiersMemoryMixed(predSize, whichSize, infoSize,
offsetPredSize);
// Initialize the device output memory
int *d_numIns;
int numInsSize = sizeof(int) * 3 * classifiersPerRun * (int) ceil(
(double) instancesPerRun / (double) threadsPerBlock);
cutilSafeCall(hipMalloc((void **) &d_numIns, numInsSize));
int *finalStruct;
cutilSafeCall(hipMalloc((void **) &finalStruct, sizeof(int) * 3 * classifiersPerRun));
if (alreadyAllocatedInstances) {
onlyIterateClassifiersMixed(popSize, maxNumAtt, ruleSize, atts,
classifiersPerRun, instancesPerRun, predicates, predSize,
whichAtt, whichSize, info, infoSize, offsetPred,
offsetPredSize, d_numIns, finalStruct, counters, strataOffset);
} else if (classifiersPerRun == popSize) {
iteratingOverClassifiersMixed(popSize, numInstances, maxNumAtt,
ruleSize, atts, classifiersPerRun, instancesPerRun, predicates,
predSize, whichAtt, whichSize, info, infoSize, offsetPred,
offsetPredSize, realValues, realValuesSize, realClasses,
realClassesSize, typeOfAttributes, typeOfAttSize, d_numIns, finalStruct,
counters, strataOffset);
} else {
iteratingOverInstancesMixed(popSize, numInstances, maxNumAtt, ruleSize,
atts, classifiersPerRun, instancesPerRun, predicates, predSize,
whichAtt, whichSize, info, infoSize, offsetPred,
offsetPredSize, realValues, realValuesSize, realClasses,
realClassesSize, typeOfAttributes, typeOfAttSize, d_numIns, finalStruct,
counters, strataOffset);
}
if (!alreadyAllocatedInstances)
freeInstanceMemory();
freeClassifiersMemoryMixed();
hipFree(d_numIns);
hipFree(finalStruct);
return counters;
}
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchReal(int insPerRun,
int classPerRun,
int maxNumAtt, int numAttIns,
float *predicates,
int *whichAtt,
ClassifierInfo * info,
float *realValues,
int *realClasses,
int *numIns,
int *finalStruct)
{
// Calculating the classifier and instance indexes inside the device structures
int insIndex = blockIdx.x * blockDim.x + threadIdx.x;
int classIndex = blockIdx.y * blockDim.y + threadIdx.y;
int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int tidDim = tid * 3;
// If this data indexes exist
if (insIndex < insPerRun && classIndex < classPerRun) {
// Calculate match for the classifier and instance pair
int attIndex = classIndex * maxNumAtt;
int end=attIndex+info[classIndex].numAtt;
int predOffset = attIndex * 2;
int base = insIndex * numAttIns;
int res = 1;
for (; res && attIndex<end; attIndex++,predOffset+=2) {
float value = realValues[base + whichAtt[attIndex]];
if (value < predicates[predOffset]) res = 0;
if (value > predicates[predOffset + 1]) res = 0;
}
int action = (realClasses[insIndex] == info[classIndex].predictedClass);
sdata[tidDim] = res;
sdata[tidDim + 1] = action;
sdata[tidDim + 2] = action && res;
} else {
sdata[tidDim] = 0;
sdata[tidDim + 1] = 0;
sdata[tidDim + 2] = 0;
}
__syncthreads();
// do reduction in shared mem
if (blockDim.x == 1024 && tid < 512) {
sdata[tidDim] += sdata[tidDim + 1536];
sdata[tidDim + 1] += sdata[tidDim + 1537];
sdata[tidDim + 2] += sdata[tidDim + 1538];
}
__syncthreads();
if (tid < 256) {
sdata[tidDim] += sdata[tidDim + 768];
sdata[tidDim + 1] += sdata[tidDim + 769];
sdata[tidDim + 2] += sdata[tidDim + 770];
}
__syncthreads();
if (tid < 128) {
sdata[tidDim] += sdata[tidDim + 384];
sdata[tidDim + 1] += sdata[tidDim + 385];
sdata[tidDim + 2] += sdata[tidDim + 386];
}
__syncthreads();
if (tid < 64) {
sdata[tidDim] += sdata[tidDim + 192];
sdata[tidDim + 1] += sdata[tidDim + 193];
sdata[tidDim + 2] += sdata[tidDim + 194];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
volatile int *sd=sdata;
sd[tidDim] += sd[tidDim + 96];
sd[tidDim + 1] += sd[tidDim + 97];
sd[tidDim + 2] += sd[tidDim + 98];
EMUSYNC;
sd[tidDim] += sd[tidDim + 48];
sd[tidDim + 1] += sd[tidDim + 49];
sd[tidDim + 2] += sd[tidDim + 50];
EMUSYNC;
sd[tidDim] += sd[tidDim + 24];
sd[tidDim + 1] += sd[tidDim + 25];
sd[tidDim + 2] += sd[tidDim + 26];
EMUSYNC;
sd[tidDim] += sd[tidDim + 12];
sd[tidDim + 1] += sd[tidDim + 13];
sd[tidDim + 2] += sd[tidDim + 14];
EMUSYNC;
sd[tidDim] += sd[tidDim + 6];
sd[tidDim + 1] += sd[tidDim + 7];
sd[tidDim + 2] += sd[tidDim + 8];
EMUSYNC;
sd[tidDim] += sd[tidDim + 3];
sd[tidDim + 1] += sd[tidDim + 4];
sd[tidDim + 2] += sd[tidDim + 5];
EMUSYNC;
}
if (tid == 0) {
if (gridDim.x == 1) {
int offset = classIndex*3;
finalStruct[offset] = sdata[0];
finalStruct[offset + 1] = sdata[1];
finalStruct[offset + 2] = sdata[2];
} else {
int numInsIndex = classIndex * gridDim.x + blockIdx.x;
int numInsOffset = gridDim.x * classPerRun;
numIns[numInsIndex] = sdata[0];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[1];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[2];
}
}
}
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchNominal(int insPerRun,
int classPerRun,
int ruleSize,
unsigned char *chromosome,
float *realValues,
int *realClasses,
int *numIns)
{
// Calculating the classifier and instance indexes inside the device structures
int insIndex = blockIdx.x * blockDim.x + threadIdx.x;
int classIndex = blockIdx.y * blockDim.y + threadIdx.y;
int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int tidDim = tid * 3;
// If this data indexes exist
if (insIndex < insPerRun && classIndex < classPerRun) {
// Calculate match for the classifier and instance pair
int j;
int res = 1;
for (j = 0; res && j < c_numAtts[0]; j++) {
if (chromosome[classIndex * ruleSize + c_offsetAttribute[j]
+ (unsigned char)realValues[insIndex * c_numAtts[0] + j]] == 0) {
res = 0;
}
}
int action =
(realClasses[insIndex] ==
chromosome[classIndex*ruleSize + ruleSize - 1]);
sdata[tidDim] = res;
sdata[tidDim + 1] = action;
sdata[tidDim + 2] = action && res;
} else {
sdata[tidDim] = 0;
sdata[tidDim + 1] = 0;
sdata[tidDim + 2] = 0;
}
__syncthreads();
// do reduction in shared mem
if (blockDim.x == 1024 && tid < 512) {
sdata[tidDim] += sdata[tidDim + 1536];
sdata[tidDim + 1] += sdata[tidDim + 1537];
sdata[tidDim + 2] += sdata[tidDim + 1538];
}
__syncthreads();
if (tid < 256) {
sdata[tidDim] += sdata[tidDim + 768];
sdata[tidDim + 1] += sdata[tidDim + 769];
sdata[tidDim + 2] += sdata[tidDim + 770];
}
__syncthreads();
if (tid < 128) {
sdata[tidDim] += sdata[tidDim + 384];
sdata[tidDim + 1] += sdata[tidDim + 385];
sdata[tidDim + 2] += sdata[tidDim + 386];
}
__syncthreads();
if (tid < 64) {
sdata[tidDim] += sdata[tidDim + 192];
sdata[tidDim + 1] += sdata[tidDim + 193];
sdata[tidDim + 2] += sdata[tidDim + 194];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
sdata[tidDim] += sdata[tidDim + 96];
sdata[tidDim + 1] += sdata[tidDim + 97];
sdata[tidDim + 2] += sdata[tidDim + 98];
EMUSYNC;
sdata[tidDim] += sdata[tidDim + 48];
sdata[tidDim + 1] += sdata[tidDim + 49];
sdata[tidDim + 2] += sdata[tidDim + 50];
EMUSYNC;
sdata[tidDim] += sdata[tidDim + 24];
sdata[tidDim + 1] += sdata[tidDim + 25];
sdata[tidDim + 2] += sdata[tidDim + 26];
EMUSYNC;
sdata[tidDim] += sdata[tidDim + 12];
sdata[tidDim + 1] += sdata[tidDim + 13];
sdata[tidDim + 2] += sdata[tidDim + 14];
EMUSYNC;
sdata[tidDim] += sdata[tidDim + 6];
sdata[tidDim + 1] += sdata[tidDim + 7];
sdata[tidDim + 2] += sdata[tidDim + 8];
EMUSYNC;
sdata[tidDim] += sdata[tidDim + 3];
sdata[tidDim + 1] += sdata[tidDim + 4];
sdata[tidDim + 2] += sdata[tidDim + 5];
EMUSYNC;
}
if (tid == 0) {
if (gridDim.x == 1) {
int offset = classIndex*3;
numIns[offset] = sdata[0];
numIns[offset + 1] = sdata[1];
numIns[offset + 2] = sdata[2];
} else {
int numInsIndex = classIndex * gridDim.x + blockIdx.x;
int numInsOffset = gridDim.x * classPerRun;
numIns[numInsIndex] = sdata[0];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[1];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[2];
}
}
}
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchMixed(int insPerRun,
int classPerRun,
int maxNumAtt, int ruleSize, int numAttIns,
float *predicates,
int *whichAtt,
ClassifierInfo * info,
int * offsetPredicates,
float *realValues,
int *realClasses,
int *numIns,
int *finalStruct)
{
// Calculating the classifier and instance indexes inside the device structures
int insIndex = blockIdx.x * blockDim.x + threadIdx.x;
int classIndex = blockIdx.y * blockDim.y + threadIdx.y;
int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int tidDim = tid * 3;
// If this data indexes exist
if (insIndex < insPerRun && classIndex < classPerRun) {
// Calculate match for the classifier and instance pair
int res = 1;
int attIndex = classIndex * maxNumAtt;
int end = attIndex+info[classIndex].numAtt;
int baseI = insIndex * numAttIns;
int baseR = classIndex * ruleSize;
for (; res && attIndex<end; attIndex++) {
int predOffset = baseR + offsetPredicates[attIndex];
int att=whichAtt[attIndex];
if(c_typeOfAttribute[att] == REAL) {
float value = realValues[baseI + att];
if (value < predicates[predOffset]) res = 0;
if (value > predicates[predOffset + 1]) res = 0;
} else {
if(predicates[predOffset+(int)realValues[baseI + att]]==0) res = 0;
}
}
int action = (realClasses[insIndex] == info[classIndex].predictedClass);
sdata[tidDim] = res;
sdata[tidDim + 1] = action;
sdata[tidDim + 2] = action && res;
} else {
sdata[tidDim] = 0;
sdata[tidDim + 1] = 0;
sdata[tidDim + 2] = 0;
}
__syncthreads();
// do reduction in shared mem
if (blockDim.x == 1024 && tid < 512) {
sdata[tidDim] += sdata[tidDim + 1536];
sdata[tidDim + 1] += sdata[tidDim + 1537];
sdata[tidDim + 2] += sdata[tidDim + 1538];
}
__syncthreads();
if (tid < 256) {
sdata[tidDim] += sdata[tidDim + 768];
sdata[tidDim + 1] += sdata[tidDim + 769];
sdata[tidDim + 2] += sdata[tidDim + 770];
}
__syncthreads();
if (tid < 128) {
sdata[tidDim] += sdata[tidDim + 384];
sdata[tidDim + 1] += sdata[tidDim + 385];
sdata[tidDim + 2] += sdata[tidDim + 386];
}
__syncthreads();
if (tid < 64) {
sdata[tidDim] += sdata[tidDim + 192];
sdata[tidDim + 1] += sdata[tidDim + 193];
sdata[tidDim + 2] += sdata[tidDim + 194];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
volatile int *sd=sdata;
sd[tidDim] += sd[tidDim + 96];
sd[tidDim + 1] += sd[tidDim + 97];
sd[tidDim + 2] += sd[tidDim + 98];
EMUSYNC;
sd[tidDim] += sd[tidDim + 48];
sd[tidDim + 1] += sd[tidDim + 49];
sd[tidDim + 2] += sd[tidDim + 50];
EMUSYNC;
sd[tidDim] += sd[tidDim + 24];
sd[tidDim + 1] += sd[tidDim + 25];
sd[tidDim + 2] += sd[tidDim + 26];
EMUSYNC;
sd[tidDim] += sd[tidDim + 12];
sd[tidDim + 1] += sd[tidDim + 13];
sd[tidDim + 2] += sd[tidDim + 14];
EMUSYNC;
sd[tidDim] += sd[tidDim + 6];
sd[tidDim + 1] += sd[tidDim + 7];
sd[tidDim + 2] += sd[tidDim + 8];
EMUSYNC;
sd[tidDim] += sd[tidDim + 3];
sd[tidDim + 1] += sd[tidDim + 4];
sd[tidDim + 2] += sd[tidDim + 5];
EMUSYNC;
}
if (tid == 0) {
if (gridDim.x == 1) {
int offset = classIndex*3;
finalStruct[offset] = sdata[0];
finalStruct[offset + 1] = sdata[1];
finalStruct[offset + 2] = sdata[2];
} else {
int numInsIndex = classIndex * gridDim.x + blockIdx.x;
int numInsOffset = gridDim.x * classPerRun;
numIns[numInsIndex] = sdata[0];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[1];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[2];
}
}
}
//template < unsigned int blockSize >
__global__ void reduction6(int *entrada, int * last, int totalObjects,
int arraySize, int a)
{
unsigned int blockSize = blockDim.x;
unsigned int classindex = blockIdx.y;
unsigned int insIndex = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int realindex =
classindex * arraySize + blockIdx.x * blockSize * 2 +
threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
extern __shared__ int sdata[];
sdata[tid] = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (insIndex < totalObjects) {
if (insIndex + blockSize < totalObjects) {
sdata[tid] +=
entrada[realindex] + entrada[realindex +
blockSize];
} else {
sdata[tid] += entrada[realindex];
}
insIndex += gridSize;
realindex += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (blockSize == 1024 && tid < 512) {
sdata[tid] += sdata[tid + 512];
}
__syncthreads();
if (tid < 256) {
sdata[tid] += sdata[tid + 256];
}
__syncthreads();
if (tid < 128) {
sdata[tid] += sdata[tid + 128];
}
__syncthreads();
if (tid < 64) {
sdata[tid] += sdata[tid + 64];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
volatile int *sd=sdata;
sd[tid] += sd[tid + 32];
EMUSYNC;
sd[tid] += sd[tid + 16];
EMUSYNC;
sd[tid] += sd[tid + 8];
EMUSYNC;
sd[tid] += sd[tid + 4];
EMUSYNC;
sd[tid] += sd[tid + 2];
EMUSYNC;
sd[tid] += sd[tid + 1];
EMUSYNC;
}
if(tid == 0) {
if (gridDim.x == 1) {
last[classindex*3 + a] = sdata[0];
} else {
entrada[classindex * arraySize + blockIdx.x] = sdata[0];
}
}
}
| 9657f57d3e83a3233492291954153225eb109ea9.cu | #ifndef __CUDA_COMPILED__
#define __CUDA_COMPILED__
#endif
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <assert.h>
#include <cutil_inline.h>
using namespace std;
//#define THREADS_PER_BLOCK 512
#define MAX_TYPE_SIZE 512
//#define N 512
#define NOMINAL 1
#define ENTER 2
#define REAL 3
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif
struct __align__ (8) ClassifierInfo {
int numAtt;
int predictedClass;
};
unsigned char *d_chromosome;
float *d_predicates;
int *d_whichAtt;
ClassifierInfo *d_info;
int *d_offsetPred;
float *d_realValues;
int *d_realClasses;
__constant__ int c_typeOfAttribute[MAX_TYPE_SIZE];
__constant__ int c_numAtts[1];
__constant__ int c_offsetAttribute[MAX_TYPE_SIZE];
int threadsPerBlock;
//template < unsigned int blockSize >
__global__ void reduction6(int *entrada, int * last, int totalObjects,
int arraySize, int a);
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchMixed(int insPerRun,
int classPerRun,
int maxNumAtt, int ruleSize, int numAttIns,
float *predicates,
int *whichAtt,
ClassifierInfo * info,
int * offsetPredicates,
float *realValues,
int *realClasses,
int *numIns,
int *finalStruct);
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchReal(int insPerRun,
int classPerRun,
int maxNumAtt, int numAttIns,
float *predicates,
int *whichAtt,
ClassifierInfo * info,
float *realValues,
int *realClasses,
int *numIns,
int *finalStruct);
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchNominal(int insPerRun,
int classPerRun,
int ruleSize,
unsigned char *chromosome,
float *realValues,
int *realClasses,
int *numIns);
inline int setDeviceFirstTime() {
// Determine de number of cuda enabled devices available.
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int deviceSelected;
// If there is a device available we iterate over the device to select the device with the larger capacity
if (deviceCount > 0) {
int device;
int maxCapacityFound = 0;
cudaDeviceProp prop;
for (device = 0; device < deviceCount; device++) {
cudaGetDeviceProperties(&prop, device);
if (prop.totalGlobalMem > maxCapacityFound) {
maxCapacityFound = prop.totalGlobalMem;
deviceSelected = device;
// A device with a larger capacity was found
}
}
return deviceSelected;
} else {
fprintf(stderr, "There are not CUDA enabled devices available");
exit(1);
}
}
extern "C" void setDevice(size_t * memDevice, size_t * memPerBlock, int * tBlock,
int * deviceSelected, double percent) {
// Sets 256MB as the global memory in case of using device emulation.
#ifdef __DEVICE_EMULATION__
*memDevice = 268435456;
*memPerBlock = 16384;
*tBlock = 512
threadsPerBlock = 512;
#endif
#ifndef __DEVICE_EMULATION__
cudaDeviceProp prop;
if (*deviceSelected == -1) {
*deviceSelected = setDeviceFirstTime();
}
if (*memDevice == 0 || *memPerBlock == 0 && *tBlock == 0) {
// If a device was already set previously we collect the data
cudaSetDevice(*deviceSelected);
cudaGetDeviceProperties(&prop, *deviceSelected);
// if(*deviceSelected == 0) {
//double percent = cm.getParameter(PERC_DEVICE_MEM);
*memDevice = (size_t) floor(percent*prop.totalGlobalMem);
fprintf(stdout, "Using %f of device memory %lld\n", percent, prop.totalGlobalMem);
// } else {
// *memDevice = prop.totalGlobalMem;
// }
*memPerBlock = prop.sharedMemPerBlock;
*tBlock = prop.maxThreadsPerBlock;
threadsPerBlock = prop.maxThreadsPerBlock;
//threadsPerBlock = 512;
// *memDevice = 1024*12;
fprintf(stdout, "Set mem device %lld memBlock %lld threads %d Device %d\n", *memDevice, *memPerBlock, threadsPerBlock, *deviceSelected);
}
#endif
}
void launchMatchReal(int instancesPerRun, int classifiersPerRun, int blockX,
int maxNumAtt, int numAttIns, int shared_mem_size, int *d_numIns,
int *finalStruct,
int strataOffset) {
// Initialize block and grid size
dim3 grid = dim3(blockX, classifiersPerRun, 1);
dim3 threads = dim3(threadsPerBlock, 1, 1);
// Calculate the match and predicted for each instance and classifier pair.
cudaCalculateMatchReal<<< grid, threads,
shared_mem_size * 3 >>> (instancesPerRun, classifiersPerRun,
maxNumAtt, numAttIns, d_predicates,
d_whichAtt, d_info, &d_realValues[strataOffset*numAttIns],
&d_realClasses[strataOffset], d_numIns, finalStruct);
cutilCheckMsg("Kernel execution failed");
cudaThreadSynchronize();
}
void launchMatchNominal(int instancesPerRun, int classifiersPerRun, int blockX,
int shared_mem_size, int atts, int ruleSize, int *d_numIns,
int strataOffset) {
// Initialize block and grid size
dim3 grid = dim3(blockX, classifiersPerRun, 1);
dim3 threads = dim3(threadsPerBlock, 1, 1);
// Calculate the match and predicted for each instance and classifier pair.
cudaCalculateMatchNominal<<< grid, threads,
shared_mem_size * 3 >>> (instancesPerRun, classifiersPerRun, ruleSize,
d_chromosome,
&d_realValues[strataOffset*atts],
&d_realClasses[strataOffset], d_numIns);
cutilCheckMsg("Kernel execution failed");
cudaThreadSynchronize();
}
void launchMatchMixed(int instancesPerRun, int classifiersPerRun, int blockX,
int maxNumAtt, int ruleSize, int numAttIns, int shared_mem_size,
int *d_numIns, int *finalStruct, int strataOffset) {
// Initialize block and grid size
dim3 grid = dim3(blockX, classifiersPerRun, 1);
dim3 threads = dim3(threadsPerBlock, 1, 1);
// Calculate the match and predicted for each instance and classifier pair.
cudaCalculateMatchMixed<<< grid, threads,
shared_mem_size * 3 >>> (instancesPerRun, classifiersPerRun,
maxNumAtt, ruleSize, numAttIns, d_predicates,
d_whichAtt, d_info, d_offsetPred, &d_realValues[strataOffset*numAttIns],
&d_realClasses[strataOffset], d_numIns, finalStruct);
cutilCheckMsg("Kernel execution failed");
cudaThreadSynchronize();
}
void launchReduction(int insPerRun, int classPerRun, int shared_mem_size,
int *d_numIns, int *finalStruct) {
int blockX =
(int) ceil((double) insPerRun / ((double) threadsPerBlock * 2));
//Iterates over the three areas created in the first kernel
for (unsigned int a = 0; a < 3; a++) {
int offset = a * insPerRun * classPerRun;
unsigned int numThreads = insPerRun;
// int numBlocks = (int) ceil(blockX / (double) N);
int numBlocks = blockX;
//Runs the reduction until the number of blocks is 0
while (numBlocks > 0) {
// setup execution parameters
dim3 grid(numBlocks, classPerRun, 1);
dim3 threads(threadsPerBlock, 1, 1);
//OJO
reduction6<<< grid, threads,
shared_mem_size >>> (&d_numIns[offset], finalStruct,
numThreads, insPerRun,a);
cudaThreadSynchronize();
cutilCheckMsg("Kernel execution failed");
numThreads = numBlocks;
numBlocks = (numBlocks == 1 ? 0 : (int) ceil((double) numThreads
/ (double) (threadsPerBlock)));
// numBlocks = (numBlocks == 1 ? 0 : (int) ceil((double) numThreads
// / (double) (threadsPerBlock * N)));
}
}
}
inline void launchKernelsReal(int instancesPerRun, int classifiersPerRun,
int maxNumAtt, int numAttIns, int classChecked, int *d_numIns, int *finalStruct,
int **counters, int strataOffset) {
unsigned int shared_mem_size = sizeof(int) * threadsPerBlock;
int blockX = (int) ceil((double) instancesPerRun
/
(double) threadsPerBlock);
// ************ first kernel **********
launchMatchReal(instancesPerRun, classifiersPerRun, blockX, maxNumAtt,
numAttIns, shared_mem_size, d_numIns, finalStruct, strataOffset);
// *********** second kernel **********
if (blockX > 1) {
launchReduction(blockX, classifiersPerRun, shared_mem_size, d_numIns, finalStruct);
}
// Copy the memory from device to host and the organize it into de counter structure
int size = sizeof(int) * classifiersPerRun * 3;
int * result = (int *) malloc(size);
cutilSafeCall(cudaMemcpy(result, finalStruct, size, cudaMemcpyDeviceToHost));
for (unsigned int classi = 0; classi < classifiersPerRun; classi++) {
int offset = classi * 3;
counters[classChecked + classi][0] += result[offset];
counters[classChecked + classi][1] += result[offset + 1];
counters[classChecked + classi][2] += result[offset + 2];
}
}
inline void launchKernelsNominal(int instancesPerRun, int classifiersPerRun,
int classChecked, int atts, int ruleSize, int *d_numIns, int **counters,
int strataOffset) {
unsigned int shared_mem_size = sizeof(int) * threadsPerBlock;
int blockX = (int) ceil((double) instancesPerRun
/
(double) threadsPerBlock);
// ************ first kernel **********
launchMatchNominal(instancesPerRun, classifiersPerRun, blockX,
shared_mem_size, atts, ruleSize, d_numIns, strataOffset);
// *********** second kernel **********
if (blockX > 1) {
launchReduction(blockX, classifiersPerRun, shared_mem_size, d_numIns, d_numIns);
}
int size = sizeof(int) * classifiersPerRun * 3;
int * result = (int *) malloc(size);
cutilSafeCall(cudaMemcpy(result, d_numIns, size, cudaMemcpyDeviceToHost));
for (unsigned int classi = 0; classi < classifiersPerRun; classi++) {
int offset = classi * 3;
counters[classChecked + classi][0] += result[offset];
counters[classChecked + classi][1] += result[offset + 1];
counters[classChecked + classi][2] += result[offset + 2];
}
}
inline void launchKernelsMixed(int instancesPerRun, int classifiersPerRun,
int maxNumAtt, int ruleSize, int numAttIns, int classChecked,
int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
unsigned int shared_mem_size = sizeof(int) * threadsPerBlock;
int blockX = (int) ceil((double) instancesPerRun
/
(double) threadsPerBlock);
// ************ first kernel **********
launchMatchMixed(instancesPerRun, classifiersPerRun, blockX, maxNumAtt,
ruleSize, numAttIns, shared_mem_size, d_numIns, finalStruct, strataOffset);
// *********** second kernel **********
if (blockX > 1) {
launchReduction(blockX, classifiersPerRun, shared_mem_size, d_numIns, finalStruct);
}
// Copy the memory from device to host and the organize it into de counter structure
int size = sizeof(int) * classifiersPerRun * 3;
int * result = (int *) malloc(size);
cutilSafeCall(cudaMemcpy(result, finalStruct, size, cudaMemcpyDeviceToHost));
for (unsigned int classi = 0; classi < classifiersPerRun; classi++) {
int offset = classi * 3;
counters[classChecked + classi][0] += result[offset];
counters[classChecked + classi][1] += result[offset + 1];
counters[classChecked + classi][2] += result[offset + 2];
}
}
inline void allocateInstanceMemory(int realValuesSize, int realClassesSize) {
// Allocating instance memory
cutilSafeCall(cudaMalloc((void **) &d_realValues, realValuesSize));
cutilSafeCall(cudaMalloc((void **) &d_realClasses, realClassesSize));
}
extern "C" void freeInstanceMemory() {
// Setting free the instance memory
cudaFree(d_realValues);
cudaFree(d_realClasses);
}
inline void allocateClassifiersMemoryReal(int predSize, int whichSize,
int infoSize) {
// Allocating real classifiers memory
cutilSafeCall(cudaMalloc((void **) &d_whichAtt, whichSize));
cutilSafeCall(cudaMalloc((void **) &d_predicates, predSize));
cutilSafeCall(cudaMalloc((void **) &d_info, infoSize));
}
inline void allocateClassifiersMemoryNominal(int ruleSize) {
cutilSafeCall(cudaMalloc((void **) &d_chromosome, ruleSize));
}
inline void allocateClassifiersMemoryMixed(int predSize, int whichSize,
int infoSize, int offsetPredSize) {
// Allocating mixed classifiers memory
cutilSafeCall(cudaMalloc((void **) &d_whichAtt, whichSize));
cutilSafeCall(cudaMalloc((void **) &d_predicates, predSize));
cutilSafeCall(cudaMalloc((void **) &d_info, infoSize));
cutilSafeCall(cudaMalloc((void **) &d_offsetPred, offsetPredSize));
}
inline void freeClassifiersMemoryReal() {
// Seting free the classifier memory
cudaFree(d_predicates);
cudaFree(d_info);
cudaFree(d_whichAtt);
}
inline void freeClassifiersMemoryNominal() {
// Seting free the classifier memory
cudaFree(d_chromosome);
}
inline void freeClassifiersMemoryMixed() {
// Seting free the classifier memory
cudaFree(d_predicates);
cudaFree(d_info);
cudaFree(d_whichAtt);
cudaFree(d_offsetPred);
}
extern "C" void copyStaticClassifierInfo(int atts, int *offsetPredicates,
int offsetPredSize) {
// This information is static a doesn't not chance during the whole execution of the GA.
cutilSafeCall(cudaMemcpyToSymbol(c_numAtts, &atts, 1, 0,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpyToSymbol(c_offsetAttribute, offsetPredicates,
offsetPredSize, 0, cudaMemcpyHostToDevice));
}
inline void copyInstanceMemoryReal(int numInstances, int insChecked, int atts,
int *instancesPerRun, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize) {
// Adjusting the instances per run parameter for the last iteration
if (*instancesPerRun > numInstances - insChecked) {
*instancesPerRun = numInstances - insChecked;
realClassesSize = sizeof(int) * (*instancesPerRun);
realValuesSize = sizeof(float) * (*instancesPerRun) * atts;
}
// Copying the instance data into the device memory
cutilSafeCall(cudaMemcpy(d_realValues, &(realValues[insChecked * atts]),
realValuesSize, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_realClasses, &(realClasses[insChecked]),
realClassesSize, cudaMemcpyHostToDevice));
}
inline void copyInstanceMemoryMixed(int numInstances, int insChecked, int atts,
int *instancesPerRun, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize, int * typeOfAttributes,
int typeOfAttSize) {
// Adjusting the instances per run parameter for the last iteration
if (*instancesPerRun > numInstances - insChecked) {
*instancesPerRun = numInstances - insChecked;
realClassesSize = sizeof(int) * (*instancesPerRun);
realValuesSize = sizeof(float) * (*instancesPerRun) * atts;
}
// Copying the instance data into the device memory
cutilSafeCall(cudaMemcpy(d_realValues, &(realValues[insChecked * atts]),
realValuesSize, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_realClasses, &(realClasses[insChecked]),
realClassesSize, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpyToSymbol(c_typeOfAttribute, typeOfAttributes,
typeOfAttSize, 0, cudaMemcpyHostToDevice));
}
// This function is called by functions.cpp to allocate the instances at the beginning
extern "C" void allocateInstanceMemoryCuda(int realValuesSize, int realClassesSize) {
allocateInstanceMemory(realValuesSize, realClassesSize);
}
// This function is called by functions.cpp to copy the instances at the beginning
extern "C" void copyInstancesToDeviceCudaReal(int numInstances, int atts,
int *instancesPerRun, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize) {
copyInstanceMemoryReal(numInstances, 0, atts, instancesPerRun, realValues,
realValuesSize, realClasses, realClassesSize);
}
// This function is called by functions.cpp to copy the instances at the beginning
extern "C" void copyInstancesToDeviceCudaMixed(int numInstances, int atts,
int *instancesPerRun, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize, int * typeOfAttributes,
int typeOfAttSize) {
copyInstanceMemoryMixed(numInstances, 0, atts, instancesPerRun, realValues,
realValuesSize, realClasses, realClassesSize, typeOfAttributes,
typeOfAttSize);
}
inline void copyClassifiersMemoryReal(int popSize, int classChecked,
int maxNumAtt, int *classifiersPerRun, float *predicates, int predSize,
int *whichAtt, int whichSize, ClassifierInfo * info, int infoSize) {
// Adjusting the classifiers per run for the last iterations
if (*classifiersPerRun > popSize - classChecked) {
*classifiersPerRun = popSize - classChecked;
predSize = sizeof(float) * (*classifiersPerRun) * maxNumAtt * 2;
whichSize = sizeof(int) * (*classifiersPerRun) * maxNumAtt;
}
// Copying pop info into the device memory
cutilSafeCall(cudaMemcpy(d_predicates, &(predicates[classChecked
* maxNumAtt * 2]), predSize, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_whichAtt, &(whichAtt[classChecked * maxNumAtt]),
whichSize, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_info, &(info[classChecked]), infoSize,
cudaMemcpyHostToDevice));
}
inline void copyClassifiersMemoryNominal(int popSize, int classChecked,
int *classifiersPerRun, int ruleSize, unsigned char *chromosome,
int chromosomeSize) {
// Adjusting the classifiers per run for the last iterations
if (*classifiersPerRun > popSize - classChecked) {
*classifiersPerRun = popSize - classChecked;
chromosomeSize = sizeof(unsigned char) * (*classifiersPerRun)
* ruleSize;
}
// Copying pop info into the device memory
cutilSafeCall(cudaMemcpy(d_chromosome,
&(chromosome[classChecked * ruleSize]), chromosomeSize,
cudaMemcpyHostToDevice));
}
inline void copyClassifiersMemoryMixed(int popSize, int classChecked,
int maxNumAtt, int ruleSize, int *classifiersPerRun, float *predicates,
int predSize, int *whichAtt, int whichSize, ClassifierInfo * info,
int infoSize, int * offsetPred, int offsetPredSize) {
// Adjusting the classifiers per run for the last iterations
if (*classifiersPerRun > popSize - classChecked) {
*classifiersPerRun = popSize - classChecked;
predSize = sizeof(float) * (*classifiersPerRun) * ruleSize;
whichSize = sizeof(int) * (*classifiersPerRun) * maxNumAtt;
offsetPredSize = whichSize;
}
// Copying pop info into the device memory
cutilSafeCall(cudaMemcpy(d_predicates,
&(predicates[classChecked * ruleSize]), predSize,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_whichAtt, &(whichAtt[classChecked * maxNumAtt]),
whichSize, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_info, &(info[classChecked]), infoSize,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_offsetPred, &(offsetPred[classChecked
* maxNumAtt]), offsetPredSize, cudaMemcpyHostToDevice));
}
inline void iteratingOverClassifiersReal(int popSize, int numInstances,
int maxNumAtt, int atts, int classifiersPerRun, int instancesPerRun,
float *predicates, int predSize, int *whichAtt, int whichSize,
ClassifierInfo * info, int infoSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
// Iterating over the classifiers to copy the info into device memory and calculate de counters
int classifiersPerRunOrig = classifiersPerRun;
for (int classChecked = 0; classChecked < popSize; classChecked
= classChecked + classifiersPerRun) {
copyClassifiersMemoryReal(popSize, classChecked, maxNumAtt,
&classifiersPerRun, predicates, predSize, whichAtt, whichSize,
info, infoSize);
int instancesPerRunOrig = instancesPerRun;
// Iterate over the instances to copy into device memory and calculate the counters
for (int insChecked = 0; insChecked < numInstances; insChecked
= insChecked + instancesPerRun) {
copyInstanceMemoryReal(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize);
launchKernelsReal(instancesPerRun, classifiersPerRun, maxNumAtt,
atts, classChecked, d_numIns, finalStruct, counters, strataOffset);
}
instancesPerRun = instancesPerRunOrig;
}
classifiersPerRun = classifiersPerRunOrig;
}
inline void iteratingOverClassifiersNominal(int popSize, int numInstances,
int atts, int ruleSize, int classifiersPerRun, int instancesPerRun,
unsigned char * chromosome, int chromosomeSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int *d_numIns, int **counters, int strataOffset) {
// Iterating over the classifiers to copy the info into device memory and calculate de counters
int classifiersPerRunOrig = classifiersPerRun;
for (int classChecked = 0; classChecked < popSize; classChecked
= classChecked + classifiersPerRun) {
copyClassifiersMemoryNominal(popSize, classChecked, &classifiersPerRun,
ruleSize, chromosome, chromosomeSize);
int instancesPerRunOrig = instancesPerRun;
// Iterate over the instances to copy into device memory and calculate the counters
for (int insChecked = 0; insChecked < numInstances; insChecked
= insChecked + instancesPerRun) {
copyInstanceMemoryReal(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize);
launchKernelsNominal(instancesPerRun, classifiersPerRun,
classChecked, atts, ruleSize, d_numIns, counters, strataOffset);
}
instancesPerRun = instancesPerRunOrig;
}
classifiersPerRun = classifiersPerRunOrig;
}
inline void iteratingOverClassifiersMixed(int popSize, int numInstances,
int maxNumAtt, int ruleSize, int atts, int classifiersPerRun,
int instancesPerRun, float *predicates, int predSize, int *whichAtt,
int whichSize, ClassifierInfo * info, int infoSize, int * offsetPred,
int offsetPredSize, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize, int * typeOfAttributes,
int typeOfAttSize, int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
// Iterating over the classifiers to copy the info into device memory and calculate de counters
int classifiersPerRunOrig = classifiersPerRun;
for (int classChecked = 0; classChecked < popSize; classChecked
= classChecked + classifiersPerRun) {
copyClassifiersMemoryMixed(popSize, classChecked, maxNumAtt, ruleSize,
&classifiersPerRun, predicates, predSize, whichAtt, whichSize,
info, infoSize, offsetPred, offsetPredSize);
int instancesPerRunOrig = instancesPerRun;
// Iterate over the instances to copy into device memory and calculate the counters
for (int insChecked = 0; insChecked < numInstances; insChecked
= insChecked + instancesPerRun) {
copyInstanceMemoryMixed(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize, typeOfAttributes, typeOfAttSize);
launchKernelsMixed(instancesPerRun, classifiersPerRun, maxNumAtt,
ruleSize, atts, classChecked, d_numIns, finalStruct, counters, strataOffset);
}
instancesPerRun = instancesPerRunOrig;
}
classifiersPerRun = classifiersPerRunOrig;
}
inline void iteratingOverInstancesReal(int popSize, int numInstances,
int maxNumAtt, int atts, int classifiersPerRun, int instancesPerRun,
float *predicates, int predSize, int *whichAtt, int whichSize,
ClassifierInfo * info, int infoSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
// Iterate over the instances to copy into device memory and calculate the counters
int instancesPerRunOrig = instancesPerRun;
for (int insChecked = 0; insChecked < numInstances; insChecked
+= instancesPerRun) {
copyInstanceMemoryReal(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize);
int classifiersPerRunOrig = classifiersPerRun;
// Iterating over the classifiers to copy the info into device memory and calculate de counters
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryReal(popSize, classChecked, maxNumAtt,
&classifiersPerRun, predicates, predSize, whichAtt,
whichSize, info, infoSize);
launchKernelsReal(instancesPerRun, classifiersPerRun, maxNumAtt,
atts, classChecked, d_numIns,finalStruct, counters, strataOffset);
}
classifiersPerRun = classifiersPerRunOrig;
}
instancesPerRun = instancesPerRunOrig;
}
inline void iteratingOverInstancesNominal(int popSize, int numInstances,
int atts, int ruleSize, int classifiersPerRun, int instancesPerRun,
unsigned char * chromosome, int chromosomeSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int *d_numIns, int **counters, int strataOffset) {
// Iterate over the instances to copy into device memory and calculate the counters
int instancesPerRunOrig = instancesPerRun;
for (int insChecked = 0; insChecked < numInstances; insChecked
+= instancesPerRun) {
copyInstanceMemoryReal(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize);
int classifiersPerRunOrig = classifiersPerRun;
// Iterating over the classifiers to copy the info into device memory and calculate de counters
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryNominal(popSize, classChecked,
&classifiersPerRun, ruleSize, chromosome, chromosomeSize);
launchKernelsNominal(instancesPerRun, classifiersPerRun,
classChecked, atts, ruleSize, d_numIns, counters, strataOffset);
}
classifiersPerRun = classifiersPerRunOrig;
}
instancesPerRun = instancesPerRunOrig;
}
inline void iteratingOverInstancesMixed(int popSize, int numInstances,
int maxNumAtt, int ruleSize, int atts, int classifiersPerRun,
int instancesPerRun, float *predicates, int predSize, int *whichAtt,
int whichSize, ClassifierInfo * info, int infoSize, int * offsetPred,
int offsetPredSize, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize, int * typeOfAttributes,
int typeOfAttSize, int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
// Iterate over the instances to copy into device memory and calculate the counters
int instancesPerRunOrig = instancesPerRun;
for (int insChecked = 0; insChecked < numInstances; insChecked
+= instancesPerRun) {
copyInstanceMemoryMixed(numInstances, insChecked, atts,
&instancesPerRun, realValues, realValuesSize, realClasses,
realClassesSize, typeOfAttributes, typeOfAttSize);
int classifiersPerRunOrig = classifiersPerRun;
// Iterating over the classifiers to copy the info into device memory and calculate de counters
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryMixed(popSize, classChecked, maxNumAtt,
ruleSize, &classifiersPerRun, predicates, predSize,
whichAtt, whichSize, info, infoSize, offsetPred,
offsetPredSize);
launchKernelsMixed(instancesPerRun, classifiersPerRun, maxNumAtt,
ruleSize, atts, classChecked, d_numIns, finalStruct, counters, strataOffset);
}
classifiersPerRun = classifiersPerRunOrig;
}
instancesPerRun = instancesPerRunOrig;
}
void onlyIterateClassifiersReal(int popSize, int maxNumAtt, int atts,
int classifiersPerRun, int instancesPerRun, float *predicates,
int predSize, int *whichAtt, int whichSize, ClassifierInfo * info,
int infoSize, int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryReal(popSize, classChecked, maxNumAtt,
&classifiersPerRun, predicates, predSize, whichAtt, whichSize,
info, infoSize);
launchKernelsReal(instancesPerRun, classifiersPerRun, maxNumAtt, atts,
classChecked, d_numIns, finalStruct, counters, strataOffset);
}
}
void onlyIterateClassifiersNominal(int popSize, int classifiersPerRun,
int instancesPerRun, int atts, int ruleSize, unsigned char *chromosome,
int chromosomeSize, int *d_numIns, int **counters, int strataOffset) {
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryNominal(popSize, classChecked, &classifiersPerRun,
ruleSize, chromosome, chromosomeSize);
launchKernelsNominal(instancesPerRun, classifiersPerRun, classChecked,
atts, ruleSize, d_numIns, counters, strataOffset);
}
}
void onlyIterateClassifiersMixed(int popSize, int maxNumAtt, int ruleSize,
int atts, int classifiersPerRun, int instancesPerRun,
float *predicates, int predSize, int *whichAtt, int whichSize,
ClassifierInfo * info, int infoSize, int * offsetPred,
int offsetPredSize, int *d_numIns, int *finalStruct, int **counters, int strataOffset) {
for (int classChecked = 0; classChecked < popSize; classChecked
+= classifiersPerRun) {
copyClassifiersMemoryMixed(popSize, classChecked, maxNumAtt, ruleSize,
&classifiersPerRun, predicates, predSize, whichAtt, whichSize,
info, infoSize, offsetPred, offsetPredSize);
launchKernelsMixed(instancesPerRun, classifiersPerRun, maxNumAtt,
ruleSize, atts, classChecked, d_numIns, finalStruct, counters, strataOffset);
}
}
extern "C" int **calculateFitnessCudaReal(int alreadyAllocatedInstances,
int maxNumAtt, int atts, int numInstances, int popSize,
float *predicates, int predSize, int *whichAtt, int whichSize,
ClassifierInfo * info, int infoSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int instancesPerRun, int classifiersPerRun, int strataOffset) {
// Initializing the counters for each classifier. This counters will be updated
// after each run, because it is possible that we wont be able to check all the
// classifiers at the same time.
int **counters = (int **) malloc(sizeof(int *) * popSize);
for (int i = 0; i < popSize; i++) {
counters[i] = (int *) malloc(sizeof(int) * 3);
counters[i][0] = 0;
counters[i][1] = 0;
counters[i][2] = 0;
}
// Reserving device memory for instances
if (!alreadyAllocatedInstances)
allocateInstanceMemory(realValuesSize, realClassesSize);
//Reserving device memory for classifiers
allocateClassifiersMemoryReal(predSize, whichSize, infoSize);
// Initialize the device output memory
int *d_numIns;
int numInsSize = sizeof(int) * 3 * classifiersPerRun * (int) ceil(
(double) instancesPerRun / (double) threadsPerBlock);
cutilSafeCall(cudaMalloc((void **) &d_numIns, numInsSize));
int *finalStruct;
cutilSafeCall(cudaMalloc((void **) &finalStruct, sizeof(int) * 3 * classifiersPerRun));
if (alreadyAllocatedInstances) {
onlyIterateClassifiersReal(popSize, maxNumAtt, atts, classifiersPerRun,
instancesPerRun, predicates, predSize, whichAtt, whichSize,
info, infoSize, d_numIns, finalStruct, counters, strataOffset);
} else if (classifiersPerRun == popSize) {
iteratingOverClassifiersReal(popSize, numInstances, maxNumAtt, atts,
classifiersPerRun, instancesPerRun, predicates, predSize,
whichAtt, whichSize, info, infoSize, realValues,
realValuesSize, realClasses, realClassesSize, d_numIns, finalStruct,
counters,strataOffset);
} else {
iteratingOverInstancesReal(popSize, numInstances, maxNumAtt, atts,
classifiersPerRun, instancesPerRun, predicates, predSize,
whichAtt, whichSize, info, infoSize, realValues,
realValuesSize, realClasses, realClassesSize, d_numIns, finalStruct,
counters,strataOffset);
}
if (!alreadyAllocatedInstances)
freeInstanceMemory();
freeClassifiersMemoryReal();
cudaFree(d_numIns);
cudaFree(finalStruct);
return counters;
}
extern "C" int **calculateFitnessCudaNominal(int alreadyAllocatedInstances,
int numInstances, int popSize, int atts, int ruleSize,
unsigned char *chromosome, int chromosomeSize, float *realValues,
int realValuesSize, int *realClasses, int realClassesSize,
int instancesPerRun, int classifiersPerRun, int strataOffset) {
// Initializing the counters for each classifier. This counters will be updated
// after each run, because it is possible that we wont be able to check all the
// classifiers at the same time.
int **counters = (int **) malloc(sizeof(int *) * popSize);
for (int i = 0; i < popSize; i++) {
counters[i] = (int *) malloc(sizeof(int) * 3);
counters[i][0] = 0;
counters[i][1] = 0;
counters[i][2] = 0;
}
// Reserving device memory for instances
if (!alreadyAllocatedInstances)
allocateInstanceMemory(realValuesSize, realClassesSize);
//Reserving device memory for classifiers
allocateClassifiersMemoryNominal(ruleSize);
// Initialize the device output memory
int *d_numIns;
int numInsSize = sizeof(int) * 3 * classifiersPerRun * (int) ceil(
(double) instancesPerRun / (double) threadsPerBlock);
cutilSafeCall(cudaMalloc((void **) &d_numIns, numInsSize));
if (alreadyAllocatedInstances) {
onlyIterateClassifiersNominal(popSize, classifiersPerRun,
instancesPerRun, atts, ruleSize, chromosome, chromosomeSize,
d_numIns, counters, strataOffset);
} else if (classifiersPerRun == popSize) {
iteratingOverClassifiersNominal(popSize, numInstances, atts, ruleSize,
classifiersPerRun, instancesPerRun, chromosome, chromosomeSize,
realValues, realValuesSize, realClasses, realClassesSize,
d_numIns, counters,strataOffset);
} else {
iteratingOverInstancesNominal(popSize, numInstances, atts, ruleSize,
classifiersPerRun, instancesPerRun, chromosome, chromosomeSize,
realValues, realValuesSize, realClasses, realClassesSize,
d_numIns, counters,strataOffset);
}
if (!alreadyAllocatedInstances)
freeInstanceMemory();
freeClassifiersMemoryNominal();
cudaFree(d_numIns);
return counters;
}
extern "C" int **calculateFitnessCudaMixed(int alreadyAllocatedInstances,
int maxNumAtt, int ruleSize, int atts, int numInstances, int popSize,
float *predicates, int predSize, int *whichAtt, int whichSize,
ClassifierInfo * info, int infoSize, int * offsetPred,
int offsetPredSize, float *realValues, int realValuesSize,
int *realClasses, int realClassesSize, int * typeOfAttributes,
int typeOfAttSize, int instancesPerRun, int classifiersPerRun,
int strataOffset) {
// Initializing the counters for each classifier. This counters will be updated
// after each run, because it is possible that we wont be able to check all the
// classifiers at the same time.
int **counters = (int **) malloc(sizeof(int *) * popSize);
for (int i = 0; i < popSize; i++) {
counters[i] = (int *) malloc(sizeof(int) * 3);
counters[i][0] = 0;
counters[i][1] = 0;
counters[i][2] = 0;
}
// Reserving device memory for instances
if (!alreadyAllocatedInstances) {
allocateInstanceMemory(realValuesSize, realClassesSize);
}
//Reserving device memory for classifiers
allocateClassifiersMemoryMixed(predSize, whichSize, infoSize,
offsetPredSize);
// Initialize the device output memory
int *d_numIns;
int numInsSize = sizeof(int) * 3 * classifiersPerRun * (int) ceil(
(double) instancesPerRun / (double) threadsPerBlock);
cutilSafeCall(cudaMalloc((void **) &d_numIns, numInsSize));
int *finalStruct;
cutilSafeCall(cudaMalloc((void **) &finalStruct, sizeof(int) * 3 * classifiersPerRun));
if (alreadyAllocatedInstances) {
onlyIterateClassifiersMixed(popSize, maxNumAtt, ruleSize, atts,
classifiersPerRun, instancesPerRun, predicates, predSize,
whichAtt, whichSize, info, infoSize, offsetPred,
offsetPredSize, d_numIns, finalStruct, counters, strataOffset);
} else if (classifiersPerRun == popSize) {
iteratingOverClassifiersMixed(popSize, numInstances, maxNumAtt,
ruleSize, atts, classifiersPerRun, instancesPerRun, predicates,
predSize, whichAtt, whichSize, info, infoSize, offsetPred,
offsetPredSize, realValues, realValuesSize, realClasses,
realClassesSize, typeOfAttributes, typeOfAttSize, d_numIns, finalStruct,
counters, strataOffset);
} else {
iteratingOverInstancesMixed(popSize, numInstances, maxNumAtt, ruleSize,
atts, classifiersPerRun, instancesPerRun, predicates, predSize,
whichAtt, whichSize, info, infoSize, offsetPred,
offsetPredSize, realValues, realValuesSize, realClasses,
realClassesSize, typeOfAttributes, typeOfAttSize, d_numIns, finalStruct,
counters, strataOffset);
}
if (!alreadyAllocatedInstances)
freeInstanceMemory();
freeClassifiersMemoryMixed();
cudaFree(d_numIns);
cudaFree(finalStruct);
return counters;
}
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchReal(int insPerRun,
int classPerRun,
int maxNumAtt, int numAttIns,
float *predicates,
int *whichAtt,
ClassifierInfo * info,
float *realValues,
int *realClasses,
int *numIns,
int *finalStruct)
{
// Calculating the classifier and instance indexes inside the device structures
int insIndex = blockIdx.x * blockDim.x + threadIdx.x;
int classIndex = blockIdx.y * blockDim.y + threadIdx.y;
int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int tidDim = tid * 3;
// If this data indexes exist
if (insIndex < insPerRun && classIndex < classPerRun) {
// Calculate match for the classifier and instance pair
int attIndex = classIndex * maxNumAtt;
int end=attIndex+info[classIndex].numAtt;
int predOffset = attIndex * 2;
int base = insIndex * numAttIns;
int res = 1;
for (; res && attIndex<end; attIndex++,predOffset+=2) {
float value = realValues[base + whichAtt[attIndex]];
if (value < predicates[predOffset]) res = 0;
if (value > predicates[predOffset + 1]) res = 0;
}
int action = (realClasses[insIndex] == info[classIndex].predictedClass);
sdata[tidDim] = res;
sdata[tidDim + 1] = action;
sdata[tidDim + 2] = action && res;
} else {
sdata[tidDim] = 0;
sdata[tidDim + 1] = 0;
sdata[tidDim + 2] = 0;
}
__syncthreads();
// do reduction in shared mem
if (blockDim.x == 1024 && tid < 512) {
sdata[tidDim] += sdata[tidDim + 1536];
sdata[tidDim + 1] += sdata[tidDim + 1537];
sdata[tidDim + 2] += sdata[tidDim + 1538];
}
__syncthreads();
if (tid < 256) {
sdata[tidDim] += sdata[tidDim + 768];
sdata[tidDim + 1] += sdata[tidDim + 769];
sdata[tidDim + 2] += sdata[tidDim + 770];
}
__syncthreads();
if (tid < 128) {
sdata[tidDim] += sdata[tidDim + 384];
sdata[tidDim + 1] += sdata[tidDim + 385];
sdata[tidDim + 2] += sdata[tidDim + 386];
}
__syncthreads();
if (tid < 64) {
sdata[tidDim] += sdata[tidDim + 192];
sdata[tidDim + 1] += sdata[tidDim + 193];
sdata[tidDim + 2] += sdata[tidDim + 194];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
volatile int *sd=sdata;
sd[tidDim] += sd[tidDim + 96];
sd[tidDim + 1] += sd[tidDim + 97];
sd[tidDim + 2] += sd[tidDim + 98];
EMUSYNC;
sd[tidDim] += sd[tidDim + 48];
sd[tidDim + 1] += sd[tidDim + 49];
sd[tidDim + 2] += sd[tidDim + 50];
EMUSYNC;
sd[tidDim] += sd[tidDim + 24];
sd[tidDim + 1] += sd[tidDim + 25];
sd[tidDim + 2] += sd[tidDim + 26];
EMUSYNC;
sd[tidDim] += sd[tidDim + 12];
sd[tidDim + 1] += sd[tidDim + 13];
sd[tidDim + 2] += sd[tidDim + 14];
EMUSYNC;
sd[tidDim] += sd[tidDim + 6];
sd[tidDim + 1] += sd[tidDim + 7];
sd[tidDim + 2] += sd[tidDim + 8];
EMUSYNC;
sd[tidDim] += sd[tidDim + 3];
sd[tidDim + 1] += sd[tidDim + 4];
sd[tidDim + 2] += sd[tidDim + 5];
EMUSYNC;
}
if (tid == 0) {
if (gridDim.x == 1) {
int offset = classIndex*3;
finalStruct[offset] = sdata[0];
finalStruct[offset + 1] = sdata[1];
finalStruct[offset + 2] = sdata[2];
} else {
int numInsIndex = classIndex * gridDim.x + blockIdx.x;
int numInsOffset = gridDim.x * classPerRun;
numIns[numInsIndex] = sdata[0];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[1];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[2];
}
}
}
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchNominal(int insPerRun,
int classPerRun,
int ruleSize,
unsigned char *chromosome,
float *realValues,
int *realClasses,
int *numIns)
{
// Calculating the classifier and instance indexes inside the device structures
int insIndex = blockIdx.x * blockDim.x + threadIdx.x;
int classIndex = blockIdx.y * blockDim.y + threadIdx.y;
int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int tidDim = tid * 3;
// If this data indexes exist
if (insIndex < insPerRun && classIndex < classPerRun) {
// Calculate match for the classifier and instance pair
int j;
int res = 1;
for (j = 0; res && j < c_numAtts[0]; j++) {
if (chromosome[classIndex * ruleSize + c_offsetAttribute[j]
+ (unsigned char)realValues[insIndex * c_numAtts[0] + j]] == 0) {
res = 0;
}
}
int action =
(realClasses[insIndex] ==
chromosome[classIndex*ruleSize + ruleSize - 1]);
sdata[tidDim] = res;
sdata[tidDim + 1] = action;
sdata[tidDim + 2] = action && res;
} else {
sdata[tidDim] = 0;
sdata[tidDim + 1] = 0;
sdata[tidDim + 2] = 0;
}
__syncthreads();
// do reduction in shared mem
if (blockDim.x == 1024 && tid < 512) {
sdata[tidDim] += sdata[tidDim + 1536];
sdata[tidDim + 1] += sdata[tidDim + 1537];
sdata[tidDim + 2] += sdata[tidDim + 1538];
}
__syncthreads();
if (tid < 256) {
sdata[tidDim] += sdata[tidDim + 768];
sdata[tidDim + 1] += sdata[tidDim + 769];
sdata[tidDim + 2] += sdata[tidDim + 770];
}
__syncthreads();
if (tid < 128) {
sdata[tidDim] += sdata[tidDim + 384];
sdata[tidDim + 1] += sdata[tidDim + 385];
sdata[tidDim + 2] += sdata[tidDim + 386];
}
__syncthreads();
if (tid < 64) {
sdata[tidDim] += sdata[tidDim + 192];
sdata[tidDim + 1] += sdata[tidDim + 193];
sdata[tidDim + 2] += sdata[tidDim + 194];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
sdata[tidDim] += sdata[tidDim + 96];
sdata[tidDim + 1] += sdata[tidDim + 97];
sdata[tidDim + 2] += sdata[tidDim + 98];
EMUSYNC;
sdata[tidDim] += sdata[tidDim + 48];
sdata[tidDim + 1] += sdata[tidDim + 49];
sdata[tidDim + 2] += sdata[tidDim + 50];
EMUSYNC;
sdata[tidDim] += sdata[tidDim + 24];
sdata[tidDim + 1] += sdata[tidDim + 25];
sdata[tidDim + 2] += sdata[tidDim + 26];
EMUSYNC;
sdata[tidDim] += sdata[tidDim + 12];
sdata[tidDim + 1] += sdata[tidDim + 13];
sdata[tidDim + 2] += sdata[tidDim + 14];
EMUSYNC;
sdata[tidDim] += sdata[tidDim + 6];
sdata[tidDim + 1] += sdata[tidDim + 7];
sdata[tidDim + 2] += sdata[tidDim + 8];
EMUSYNC;
sdata[tidDim] += sdata[tidDim + 3];
sdata[tidDim + 1] += sdata[tidDim + 4];
sdata[tidDim + 2] += sdata[tidDim + 5];
EMUSYNC;
}
if (tid == 0) {
if (gridDim.x == 1) {
int offset = classIndex*3;
numIns[offset] = sdata[0];
numIns[offset + 1] = sdata[1];
numIns[offset + 2] = sdata[2];
} else {
int numInsIndex = classIndex * gridDim.x + blockIdx.x;
int numInsOffset = gridDim.x * classPerRun;
numIns[numInsIndex] = sdata[0];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[1];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[2];
}
}
}
//template < unsigned int blockSize >
__global__ static void cudaCalculateMatchMixed(int insPerRun,
int classPerRun,
int maxNumAtt, int ruleSize, int numAttIns,
float *predicates,
int *whichAtt,
ClassifierInfo * info,
int * offsetPredicates,
float *realValues,
int *realClasses,
int *numIns,
int *finalStruct)
{
// Calculating the classifier and instance indexes inside the device structures
int insIndex = blockIdx.x * blockDim.x + threadIdx.x;
int classIndex = blockIdx.y * blockDim.y + threadIdx.y;
int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int tidDim = tid * 3;
// If this data indexes exist
if (insIndex < insPerRun && classIndex < classPerRun) {
// Calculate match for the classifier and instance pair
int res = 1;
int attIndex = classIndex * maxNumAtt;
int end = attIndex+info[classIndex].numAtt;
int baseI = insIndex * numAttIns;
int baseR = classIndex * ruleSize;
for (; res && attIndex<end; attIndex++) {
int predOffset = baseR + offsetPredicates[attIndex];
int att=whichAtt[attIndex];
if(c_typeOfAttribute[att] == REAL) {
float value = realValues[baseI + att];
if (value < predicates[predOffset]) res = 0;
if (value > predicates[predOffset + 1]) res = 0;
} else {
if(predicates[predOffset+(int)realValues[baseI + att]]==0) res = 0;
}
}
int action = (realClasses[insIndex] == info[classIndex].predictedClass);
sdata[tidDim] = res;
sdata[tidDim + 1] = action;
sdata[tidDim + 2] = action && res;
} else {
sdata[tidDim] = 0;
sdata[tidDim + 1] = 0;
sdata[tidDim + 2] = 0;
}
__syncthreads();
// do reduction in shared mem
if (blockDim.x == 1024 && tid < 512) {
sdata[tidDim] += sdata[tidDim + 1536];
sdata[tidDim + 1] += sdata[tidDim + 1537];
sdata[tidDim + 2] += sdata[tidDim + 1538];
}
__syncthreads();
if (tid < 256) {
sdata[tidDim] += sdata[tidDim + 768];
sdata[tidDim + 1] += sdata[tidDim + 769];
sdata[tidDim + 2] += sdata[tidDim + 770];
}
__syncthreads();
if (tid < 128) {
sdata[tidDim] += sdata[tidDim + 384];
sdata[tidDim + 1] += sdata[tidDim + 385];
sdata[tidDim + 2] += sdata[tidDim + 386];
}
__syncthreads();
if (tid < 64) {
sdata[tidDim] += sdata[tidDim + 192];
sdata[tidDim + 1] += sdata[tidDim + 193];
sdata[tidDim + 2] += sdata[tidDim + 194];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
volatile int *sd=sdata;
sd[tidDim] += sd[tidDim + 96];
sd[tidDim + 1] += sd[tidDim + 97];
sd[tidDim + 2] += sd[tidDim + 98];
EMUSYNC;
sd[tidDim] += sd[tidDim + 48];
sd[tidDim + 1] += sd[tidDim + 49];
sd[tidDim + 2] += sd[tidDim + 50];
EMUSYNC;
sd[tidDim] += sd[tidDim + 24];
sd[tidDim + 1] += sd[tidDim + 25];
sd[tidDim + 2] += sd[tidDim + 26];
EMUSYNC;
sd[tidDim] += sd[tidDim + 12];
sd[tidDim + 1] += sd[tidDim + 13];
sd[tidDim + 2] += sd[tidDim + 14];
EMUSYNC;
sd[tidDim] += sd[tidDim + 6];
sd[tidDim + 1] += sd[tidDim + 7];
sd[tidDim + 2] += sd[tidDim + 8];
EMUSYNC;
sd[tidDim] += sd[tidDim + 3];
sd[tidDim + 1] += sd[tidDim + 4];
sd[tidDim + 2] += sd[tidDim + 5];
EMUSYNC;
}
if (tid == 0) {
if (gridDim.x == 1) {
int offset = classIndex*3;
finalStruct[offset] = sdata[0];
finalStruct[offset + 1] = sdata[1];
finalStruct[offset + 2] = sdata[2];
} else {
int numInsIndex = classIndex * gridDim.x + blockIdx.x;
int numInsOffset = gridDim.x * classPerRun;
numIns[numInsIndex] = sdata[0];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[1];
numInsIndex+=numInsOffset;
numIns[numInsIndex] = sdata[2];
}
}
}
//template < unsigned int blockSize >
__global__ void reduction6(int *entrada, int * last, int totalObjects,
int arraySize, int a)
{
unsigned int blockSize = blockDim.x;
unsigned int classindex = blockIdx.y;
unsigned int insIndex = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int realindex =
classindex * arraySize + blockIdx.x * blockSize * 2 +
threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
extern __shared__ int sdata[];
sdata[tid] = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (insIndex < totalObjects) {
if (insIndex + blockSize < totalObjects) {
sdata[tid] +=
entrada[realindex] + entrada[realindex +
blockSize];
} else {
sdata[tid] += entrada[realindex];
}
insIndex += gridSize;
realindex += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (blockSize == 1024 && tid < 512) {
sdata[tid] += sdata[tid + 512];
}
__syncthreads();
if (tid < 256) {
sdata[tid] += sdata[tid + 256];
}
__syncthreads();
if (tid < 128) {
sdata[tid] += sdata[tid + 128];
}
__syncthreads();
if (tid < 64) {
sdata[tid] += sdata[tid + 64];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
volatile int *sd=sdata;
sd[tid] += sd[tid + 32];
EMUSYNC;
sd[tid] += sd[tid + 16];
EMUSYNC;
sd[tid] += sd[tid + 8];
EMUSYNC;
sd[tid] += sd[tid + 4];
EMUSYNC;
sd[tid] += sd[tid + 2];
EMUSYNC;
sd[tid] += sd[tid + 1];
EMUSYNC;
}
if(tid == 0) {
if (gridDim.x == 1) {
last[classindex*3 + a] = sdata[0];
} else {
entrada[classindex * arraySize + blockIdx.x] = sdata[0];
}
}
}
|
3bb0c626e1f71ede7868c9d0a76ee09a59c1a5bc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Hello world cuda
*
* compile: nvcc hello_cuda.cu -o hello
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
__global__
void cuda_hello(){
// thread id of current block (on x axis)
int tid = threadIdx.x;
// block id (on x axis)
int bid = blockIdx.x;
printf("Ciao belli from block %d core %d!\n", bid, tid);
}
int main() {
// Launch GPU kernel
dim3 g(2,2,1);
dim3 t(2,4,1);
hipLaunchKernelGGL(( cuda_hello), dim3(g),dim3(t), 0, 0, );
// cuda synch barrier
hipDeviceSynchronize();
return 0;
}
| 3bb0c626e1f71ede7868c9d0a76ee09a59c1a5bc.cu | /*
* Hello world cuda
*
* compile: nvcc hello_cuda.cu -o hello
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda.h>
__global__
void cuda_hello(){
// thread id of current block (on x axis)
int tid = threadIdx.x;
// block id (on x axis)
int bid = blockIdx.x;
printf("Ciao belli from block %d core %d!\n", bid, tid);
}
int main() {
// Launch GPU kernel
dim3 g(2,2,1);
dim3 t(2,4,1);
cuda_hello<<<g,t>>>();
// cuda synch barrier
cudaDeviceSynchronize();
return 0;
}
|
a584db819918f8c7b7cfb00e88384d406e73d016.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void x15(float* x16, float* x17, float* x18, int x19) {
int x20 = gridDim.x * blockDim.x;
int x21 = threadIdx.x + blockIdx.x * blockDim.x;
while (x21 < x19) {
int x22 = x21;
x18[x22] = x16[x22] - x17[x22];
x21 = x21 + x20;
}
} | a584db819918f8c7b7cfb00e88384d406e73d016.cu | #include "includes.h"
__global__ void x15(float* x16, float* x17, float* x18, int x19) {
int x20 = gridDim.x * blockDim.x;
int x21 = threadIdx.x + blockIdx.x * blockDim.x;
while (x21 < x19) {
int x22 = x21;
x18[x22] = x16[x22] - x17[x22];
x21 = x21 + x20;
}
} |
95a93e9826a789638e30cdad1c4d514493b52610.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
int SwishPlugin::initialize() TRT_NOEXCEPT { return 0; }
nvinfer1::Dims SwishPlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputDims,
int nbInputs) TRT_NOEXCEPT {
assert(nbInputs == 1);
assert(index < this->getNbOutputs());
nvinfer1::Dims const &input_dims = inputDims[0];
nvinfer1::Dims output_dims = input_dims;
return output_dims;
}
template <typename T>
__device__ T math_exp(T a);
template <>
__device__ half math_exp<half>(half a) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
return hexp(a);
#endif
}
template <>
__device__ float math_exp<float>(float a) {
return expf(a);
}
template <typename T>
__global__ void swish_kernel(int num, const T *input, T *output, T beta) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num) {
#if __CUDA_ARCH__ >= 350
output[index] =
__ldg(input + index) /
(static_cast<T>(1.0) + math_exp<T>(-beta * __ldg(input + index)));
#else
output[index] = input[index] /
(static_cast<T>(1.0) + math_exp<T>(-beta * input[index]));
#endif
}
}
template <>
__global__ void swish_kernel<half>(int num, const half *input, half *output,
half beta) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
output[index] =
__ldg(input + index) /
(static_cast<half>(1.0) + math_exp<half>(-beta * __ldg(input + index)));
#endif
}
}
int SwishPlugin::enqueue(int batch_size, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace, hipStream_t stream) {
#else
void *const *outputs, void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
#endif
// input dims is CHW.
const auto &input_dims = this->getInputDims(0);
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = reinterpret_cast<float *const *>(outputs)[0];
int num = batch_size;
for (int i = 0; i < input_dims.nbDims; i++) {
num *= input_dims.d[i];
}
int threads = 1024;
int blocks = (num + threads - 1) / threads;
hipLaunchKernelGGL(( swish_kernel), dim3(blocks), dim3(threads), 0, stream, num, input, output, beta_);
return hipGetLastError() != hipSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int SwishPluginDynamic::initialize() TRT_NOEXCEPT {
getPluginNamespace();
return 0;
}
size_t SwishPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(beta_) + SerializedSize(with_fp16_);
}
void SwishPluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, beta_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs SwishPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputs[0];
}
bool SwishPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SwishPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The Swish Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
int SwishPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs, void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
size_t num = ProductDim(input_dims);
int threads = 1024;
int blocks = (num + threads - 1) / threads;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp32";
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
hipLaunchKernelGGL(( swish_kernel<float>), dim3(blocks), dim3(threads), 0, stream, num, input, output,
beta_);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp16";
const half *input = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
hipLaunchKernelGGL(( swish_kernel<half>), dim3(blocks), dim3(threads), 0, stream,
num, input, output, static_cast<half>(beta_));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The Swish TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 95a93e9826a789638e30cdad1c4d514493b52610.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
int SwishPlugin::initialize() TRT_NOEXCEPT { return 0; }
nvinfer1::Dims SwishPlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputDims,
int nbInputs) TRT_NOEXCEPT {
assert(nbInputs == 1);
assert(index < this->getNbOutputs());
nvinfer1::Dims const &input_dims = inputDims[0];
nvinfer1::Dims output_dims = input_dims;
return output_dims;
}
template <typename T>
__device__ T math_exp(T a);
template <>
__device__ half math_exp<half>(half a) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
return hexp(a);
#endif
}
template <>
__device__ float math_exp<float>(float a) {
return expf(a);
}
template <typename T>
__global__ void swish_kernel(int num, const T *input, T *output, T beta) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num) {
#if __CUDA_ARCH__ >= 350
output[index] =
__ldg(input + index) /
(static_cast<T>(1.0) + math_exp<T>(-beta * __ldg(input + index)));
#else
output[index] = input[index] /
(static_cast<T>(1.0) + math_exp<T>(-beta * input[index]));
#endif
}
}
template <>
__global__ void swish_kernel<half>(int num, const half *input, half *output,
half beta) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
output[index] =
__ldg(input + index) /
(static_cast<half>(1.0) + math_exp<half>(-beta * __ldg(input + index)));
#endif
}
}
int SwishPlugin::enqueue(int batch_size, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace, cudaStream_t stream) {
#else
void *const *outputs, void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
#endif
// input dims is CHW.
const auto &input_dims = this->getInputDims(0);
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = reinterpret_cast<float *const *>(outputs)[0];
int num = batch_size;
for (int i = 0; i < input_dims.nbDims; i++) {
num *= input_dims.d[i];
}
int threads = 1024;
int blocks = (num + threads - 1) / threads;
swish_kernel<<<blocks, threads, 0, stream>>>(num, input, output, beta_);
return cudaGetLastError() != cudaSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int SwishPluginDynamic::initialize() TRT_NOEXCEPT {
getPluginNamespace();
return 0;
}
size_t SwishPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(beta_) + SerializedSize(with_fp16_);
}
void SwishPluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, beta_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs SwishPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputs[0];
}
bool SwishPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SwishPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The Swish Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
int SwishPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs, void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
size_t num = ProductDim(input_dims);
int threads = 1024;
int blocks = (num + threads - 1) / threads;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp32";
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
swish_kernel<float><<<blocks, threads, 0, stream>>>(num, input, output,
beta_);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp16";
const half *input = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
swish_kernel<half><<<blocks, threads, 0, stream>>>(
num, input, output, static_cast<half>(beta_));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The Swish TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
4b6591db6eccf20f2e3dcfaed000349a85db1e6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/**
*
* PARAMETERS
*
*/
#define VERBOSE
//#define DRY_RUN
/*#define USE_CPU /*
#define PROFILE_CPU // */
//#define USE_GPU /*
#define PROFILE_GPU // */
#define CPU_OUTPUT_FILE "julia_cpu.ppm"
#define GPU_OUTPUT_FILE "julia_gpu.ppm"
#define JULIA_X -0.8
#define JULIA_Y 0.156
#define SCALE 1.5
#define DIM 1000
/*#define PALE /*
#define WHITE // */
//#define GRID_SIZE 1024 /*
#define GRID_SIZE_2D DIM,DIM // */
//#define BLOCK_SIZE 128 /*
#define BLOCK_SIZE_2D 1,1 // */
/**
*
* CUDA UTILS
*
*/
#define cuda_try( ans ) { __cuda_try((ans), __FILE__, __LINE__); }
inline void __cuda_try( hipError_t code, const char * file, int line, bool abort=true ) {
if (code != hipSuccess) {
fprintf(stderr, "GPU assert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
*
* UTILS
*
*/
#if defined(GRID_SIZE) && !defined(GRID_SIZE_2D)
#define GRID_DIM GRID_SIZE
#elif !defined(GRID_SIZE) && defined(GRID_SIZE_2D)
#define GRID_DIM GRID_SIZE_2D
#endif
#if defined(BLOCK_SIZE) && !defined(BLOCK_SIZE_2D)
#define BLOCK_DIM BLOCK_SIZE
#elif !defined(BLOCK_SIZE) && defined(BLOCK_SIZE_2D)
#define BLOCK_DIM BLOCK_SIZE_2D
#endif
#define STR_EXPAND(...) #__VA_ARGS__
#define ARG(...) STR_EXPAND(__VA_ARGS__)
struct cppComplex {
float r;
float i;
__host__ __device__ cppComplex( float a, float b ) : r(a), i(b) {}
__host__ __device__ float magnitude2( void ) {
return r * r + i * i;
}
__host__ __device__ cppComplex operator *( const cppComplex& a ) {
return cppComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__host__ __device__ cppComplex operator +( const cppComplex& a ) {
return cppComplex(r + a.r, i + a.i);
}
};
int julia_cpu( int x, int y ) {
float jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
float jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
cppComplex c(JULIA_X, JULIA_Y);
cppComplex a(jx, jy);
int i = 0;
for(; i < 200; i ++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
void cpu_draw( unsigned char * pixels ) {
#ifdef VERBOSE
printf("cpu drawing...\n");
#endif
#ifdef PROFILE_CPU
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
for (int x = 0; x < DIM; ++x) {
for (int y = 0; y < DIM; ++ y) {
pixels[x + y * DIM] = 255 * julia_cpu(x, y);
}
}
#ifdef PROFILE_CPU
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("%f ms\n", time);
#endif
#ifdef VERBOSE
printf("cpu drawing complete\n");
#endif
}
__global__ void kernel( unsigned char * ptr, int thread_size ) {
int t_id =
#if defined(GRID_SIZE) && !defined(GRID_SIZE_2D)
blockIdx.x
#elif !defined(GRID_SIZE) && defined(GRID_SIZE_2D)
(blockIdx.x + blockIdx.y * gridDim.x)
#endif
#if defined(BLOCK_SIZE) && !defined(BLOCK_SIZE_2D)
* blockDim.x
+ threadIdx.x;
#elif !defined(BLOCK_SIZE) && defined(BLOCK_SIZE_2D)
* (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x + threadIdx.x);
#endif
int offset = thread_size * t_id;
int i = 0;
cppComplex c(JULIA_X, JULIA_Y);
int x = (i + offset) % DIM;
int y = (i + offset) / DIM;
float jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
float jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
for(; i < thread_size && offset + i < DIM * DIM; i ++) {
cppComplex a(jx, jy);
int j = 0;
for(; j < 200; j ++){
a = a * a + c;
if (a.magnitude2() > 1000)
break;
}
if (j < 200)
ptr[offset + i] = 0;
else
ptr[offset + i] = 255;
x ++;
if (x == DIM) {
x = 0;
y ++;
jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
}
jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
}
}
void gpu_draw( unsigned char * gpu_pixels ) {
int n = DIM * DIM;
dim3 grid_dim(GRID_DIM);
dim3 block_dim(BLOCK_DIM);
int grid_size = grid_dim.x * grid_dim.y * grid_dim.z;
int block_size = block_dim.x * block_dim.y * block_dim.z;
int thread_size = (n + (grid_size * block_size - 1)) / (grid_size * block_size);
#ifdef VERBOSE
printf("gpu drawing...\n");
printf("problem size %d, grid dim "ARG(GRID_DIM)"=%d, block size "ARG(BLOCK_DIM)"=%d, thread size %d\n", n, grid_size, block_size, thread_size);
#endif
unsigned char * dev_bitmap;
cuda_try(hipMalloc((void **)&dev_bitmap, n * sizeof(unsigned char)));
#ifdef PROFILE_GPU
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
hipLaunchKernelGGL(( kernel), dim3(grid_dim),dim3(block_dim), 0, 0, dev_bitmap, thread_size);
#ifdef PROFILE_GPU
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("%f ms\n", time);
#endif
cuda_try(hipPeekAtLastError());
cuda_try(hipMemcpy(gpu_pixels, dev_bitmap, n * sizeof(unsigned char), hipMemcpyDeviceToHost));
cuda_try(hipFree(dev_bitmap));
#ifdef VERBOSE
printf("gpu drawing complete\n");
#endif
}
void draw_file( char * path, unsigned char * pixels ) {
FILE * f = fopen(path, "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y ++) {
for (int x = 0; x < DIM; x ++) {
#if !defined(PALE) && !defined(WHITE)
fputc(pixels[(y * DIM + x)], f);
fputc(0, f);
fputc(0, f);
#elif defined(PALE) && !defined(WHITE)
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.3, f);
fputc(pixels[(y * DIM + x)] * 0.3, f);
#elif defined(WHITE) && !defined(PALE)
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.9, f);
#else
#warning Make up your mind on the color!
#error You must choose either PALE, WHITE, or neither!
#endif
}
}
fclose(f);
}
int main( void ) {
#ifdef VERBOSE
printf("julia set of "ARG(JULIA_X)","ARG(JULIA_Y)" resolution "ARG(DIM)"*"ARG(DIM)" scale "ARG(SCALE)"\n");
#endif
#if defined(USE_CPU) || defined(PROFILE_CPU)
unsigned char * pixels = new unsigned char[DIM * DIM];
cpu_draw(pixels);
#if !defined(DRY_RUN)
draw_file(CPU_OUTPUT_FILE, pixels);
#endif
delete [] pixels;
#endif
#if defined(USE_GPU) || defined(PROFILE_GPU)
unsigned char *gpu_pixels = new unsigned char[DIM * DIM];
gpu_draw(gpu_pixels);
#if !defined(DRY_RUN)
draw_file(GPU_OUTPUT_FILE, gpu_pixels);
#endif
delete [] gpu_pixels;
#endif
}
| 4b6591db6eccf20f2e3dcfaed000349a85db1e6d.cu | #include <stdio.h>
/**
*
* PARAMETERS
*
*/
#define VERBOSE
//#define DRY_RUN
/*#define USE_CPU /*
#define PROFILE_CPU // */
//#define USE_GPU /*
#define PROFILE_GPU // */
#define CPU_OUTPUT_FILE "julia_cpu.ppm"
#define GPU_OUTPUT_FILE "julia_gpu.ppm"
#define JULIA_X -0.8
#define JULIA_Y 0.156
#define SCALE 1.5
#define DIM 1000
/*#define PALE /*
#define WHITE // */
//#define GRID_SIZE 1024 /*
#define GRID_SIZE_2D DIM,DIM // */
//#define BLOCK_SIZE 128 /*
#define BLOCK_SIZE_2D 1,1 // */
/**
*
* CUDA UTILS
*
*/
#define cuda_try( ans ) { __cuda_try((ans), __FILE__, __LINE__); }
inline void __cuda_try( cudaError_t code, const char * file, int line, bool abort=true ) {
if (code != cudaSuccess) {
fprintf(stderr, "GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
*
* UTILS
*
*/
#if defined(GRID_SIZE) && !defined(GRID_SIZE_2D)
#define GRID_DIM GRID_SIZE
#elif !defined(GRID_SIZE) && defined(GRID_SIZE_2D)
#define GRID_DIM GRID_SIZE_2D
#endif
#if defined(BLOCK_SIZE) && !defined(BLOCK_SIZE_2D)
#define BLOCK_DIM BLOCK_SIZE
#elif !defined(BLOCK_SIZE) && defined(BLOCK_SIZE_2D)
#define BLOCK_DIM BLOCK_SIZE_2D
#endif
#define STR_EXPAND(...) #__VA_ARGS__
#define ARG(...) STR_EXPAND(__VA_ARGS__)
struct cppComplex {
float r;
float i;
__host__ __device__ cppComplex( float a, float b ) : r(a), i(b) {}
__host__ __device__ float magnitude2( void ) {
return r * r + i * i;
}
__host__ __device__ cppComplex operator *( const cppComplex& a ) {
return cppComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__host__ __device__ cppComplex operator +( const cppComplex& a ) {
return cppComplex(r + a.r, i + a.i);
}
};
int julia_cpu( int x, int y ) {
float jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
float jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
cppComplex c(JULIA_X, JULIA_Y);
cppComplex a(jx, jy);
int i = 0;
for(; i < 200; i ++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
void cpu_draw( unsigned char * pixels ) {
#ifdef VERBOSE
printf("cpu drawing...\n");
#endif
#ifdef PROFILE_CPU
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
for (int x = 0; x < DIM; ++x) {
for (int y = 0; y < DIM; ++ y) {
pixels[x + y * DIM] = 255 * julia_cpu(x, y);
}
}
#ifdef PROFILE_CPU
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("%f ms\n", time);
#endif
#ifdef VERBOSE
printf("cpu drawing complete\n");
#endif
}
__global__ void kernel( unsigned char * ptr, int thread_size ) {
int t_id =
#if defined(GRID_SIZE) && !defined(GRID_SIZE_2D)
blockIdx.x
#elif !defined(GRID_SIZE) && defined(GRID_SIZE_2D)
(blockIdx.x + blockIdx.y * gridDim.x)
#endif
#if defined(BLOCK_SIZE) && !defined(BLOCK_SIZE_2D)
* blockDim.x
+ threadIdx.x;
#elif !defined(BLOCK_SIZE) && defined(BLOCK_SIZE_2D)
* (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x + threadIdx.x);
#endif
int offset = thread_size * t_id;
int i = 0;
cppComplex c(JULIA_X, JULIA_Y);
int x = (i + offset) % DIM;
int y = (i + offset) / DIM;
float jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
float jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
for(; i < thread_size && offset + i < DIM * DIM; i ++) {
cppComplex a(jx, jy);
int j = 0;
for(; j < 200; j ++){
a = a * a + c;
if (a.magnitude2() > 1000)
break;
}
if (j < 200)
ptr[offset + i] = 0;
else
ptr[offset + i] = 255;
x ++;
if (x == DIM) {
x = 0;
y ++;
jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
}
jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
}
}
void gpu_draw( unsigned char * gpu_pixels ) {
int n = DIM * DIM;
dim3 grid_dim(GRID_DIM);
dim3 block_dim(BLOCK_DIM);
int grid_size = grid_dim.x * grid_dim.y * grid_dim.z;
int block_size = block_dim.x * block_dim.y * block_dim.z;
int thread_size = (n + (grid_size * block_size - 1)) / (grid_size * block_size);
#ifdef VERBOSE
printf("gpu drawing...\n");
printf("problem size %d, grid dim "ARG(GRID_DIM)"=%d, block size "ARG(BLOCK_DIM)"=%d, thread size %d\n", n, grid_size, block_size, thread_size);
#endif
unsigned char * dev_bitmap;
cuda_try(cudaMalloc((void **)&dev_bitmap, n * sizeof(unsigned char)));
#ifdef PROFILE_GPU
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
kernel<<<grid_dim,block_dim>>>(dev_bitmap, thread_size);
#ifdef PROFILE_GPU
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("%f ms\n", time);
#endif
cuda_try(cudaPeekAtLastError());
cuda_try(cudaMemcpy(gpu_pixels, dev_bitmap, n * sizeof(unsigned char), cudaMemcpyDeviceToHost));
cuda_try(cudaFree(dev_bitmap));
#ifdef VERBOSE
printf("gpu drawing complete\n");
#endif
}
void draw_file( char * path, unsigned char * pixels ) {
FILE * f = fopen(path, "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y ++) {
for (int x = 0; x < DIM; x ++) {
#if !defined(PALE) && !defined(WHITE)
fputc(pixels[(y * DIM + x)], f);
fputc(0, f);
fputc(0, f);
#elif defined(PALE) && !defined(WHITE)
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.3, f);
fputc(pixels[(y * DIM + x)] * 0.3, f);
#elif defined(WHITE) && !defined(PALE)
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.9, f);
#else
#warning Make up your mind on the color!
#error You must choose either PALE, WHITE, or neither!
#endif
}
}
fclose(f);
}
int main( void ) {
#ifdef VERBOSE
printf("julia set of "ARG(JULIA_X)","ARG(JULIA_Y)" resolution "ARG(DIM)"*"ARG(DIM)" scale "ARG(SCALE)"\n");
#endif
#if defined(USE_CPU) || defined(PROFILE_CPU)
unsigned char * pixels = new unsigned char[DIM * DIM];
cpu_draw(pixels);
#if !defined(DRY_RUN)
draw_file(CPU_OUTPUT_FILE, pixels);
#endif
delete [] pixels;
#endif
#if defined(USE_GPU) || defined(PROFILE_GPU)
unsigned char *gpu_pixels = new unsigned char[DIM * DIM];
gpu_draw(gpu_pixels);
#if !defined(DRY_RUN)
draw_file(GPU_OUTPUT_FILE, gpu_pixels);
#endif
delete [] gpu_pixels;
#endif
}
|
05158cb5e57058ea3384ae035983693f9d6ad666.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
// specialize it based on type (specifically double) to avoid unaligned memory
// access compile errors
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
__global__ void vecAdd(double *a, double *b, double *c, int n) {
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
// Make sure we do not go out of bounds
double *smem = SharedMemory<double>();
if (id < n) {
smem[threadIdx.x] = a[id] + b[id];
c[id] = smem[threadIdx.x];
}
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 1024;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
// Copy host vectors to device
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 256;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// shared memory size in byte
int sharedmem = blockSize * sizeof(int);
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), sharedmem, 0, d_a, d_b, d_c, n);
// Copy array back to host
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
} | 05158cb5e57058ea3384ae035983693f9d6ad666.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
// specialize it based on type (specifically double) to avoid unaligned memory
// access compile errors
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
__global__ void vecAdd(double *a, double *b, double *c, int n) {
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
// Make sure we do not go out of bounds
double *smem = SharedMemory<double>();
if (id < n) {
smem[threadIdx.x] = a[id] + b[id];
c[id] = smem[threadIdx.x];
}
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 1024;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 256;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// shared memory size in byte
int sharedmem = blockSize * sizeof(int);
// Execute the kernel
vecAdd<<<gridSize, blockSize, sharedmem>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
mish_impl.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/mish_impl.cuh"
#include "include/hip/hip_fp16.h"
template <typename T>
__global__ void MishKernel(const size_t size, const T *input_addr, T *output_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size;
pos += blockDim.x * gridDim.x) {
output_addr[pos] = input_addr[pos] * tanh(logf(1. + expf(input_addr[pos])));
}
}
template <>
__global__ void MishKernel(const size_t size, const half *input_addr, half *output_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size;
pos += blockDim.x * gridDim.x) {
output_addr[pos] = __half2float(input_addr[pos]) * tanh(logf(1. + exp(__half2float(input_addr[pos]))));
}
}
template <>
__global__ void MishKernel(const size_t size, const double *input_addr, double *output_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size;
pos += blockDim.x * gridDim.x) {
output_addr[pos] = input_addr[pos] * tanh(logf(1. + exp(input_addr[pos])));
}
}
template <typename T>
void Mish(const size_t size, const T *input_addr, T *output_addr,
const uint32_t &device_id, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( MishKernel), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0,
cuda_stream, size, input_addr, output_addr);
}
template <>
void Mish(const size_t size, const half *input_addr, half *output_addr, const uint32_t &device_id,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( MishKernel<half>), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0,
cuda_stream, size, input_addr, output_addr);
}
template <>
void Mish(const size_t size, const double *input_addr, double *output_addr,
const uint32_t &device_id, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( MishKernel<double>), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0,
cuda_stream, size, input_addr, output_addr);
}
template CUDA_LIB_EXPORT void Mish<float>(const size_t size, const float *input_addr, float *output_addr,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void Mish<half>(const size_t size, const half *input_addr, half *output_addr,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void Mish<double>(const size_t size, const double *input_addr, double *output_addr,
const uint32_t &device_id, hipStream_t cuda_stream);
| mish_impl.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/mish_impl.cuh"
#include "include/cuda_fp16.h"
template <typename T>
__global__ void MishKernel(const size_t size, const T *input_addr, T *output_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size;
pos += blockDim.x * gridDim.x) {
output_addr[pos] = input_addr[pos] * tanh(logf(1. + expf(input_addr[pos])));
}
}
template <>
__global__ void MishKernel(const size_t size, const half *input_addr, half *output_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size;
pos += blockDim.x * gridDim.x) {
output_addr[pos] = __half2float(input_addr[pos]) * tanh(logf(1. + exp(__half2float(input_addr[pos]))));
}
}
template <>
__global__ void MishKernel(const size_t size, const double *input_addr, double *output_addr) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size;
pos += blockDim.x * gridDim.x) {
output_addr[pos] = input_addr[pos] * tanh(logf(1. + exp(input_addr[pos])));
}
}
template <typename T>
void Mish(const size_t size, const T *input_addr, T *output_addr,
const uint32_t &device_id, cudaStream_t cuda_stream) {
MishKernel<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0,
cuda_stream>>>(size, input_addr, output_addr);
}
template <>
void Mish(const size_t size, const half *input_addr, half *output_addr, const uint32_t &device_id,
cudaStream_t cuda_stream) {
MishKernel<half><<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0,
cuda_stream>>>(size, input_addr, output_addr);
}
template <>
void Mish(const size_t size, const double *input_addr, double *output_addr,
const uint32_t &device_id, cudaStream_t cuda_stream) {
MishKernel<double><<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0,
cuda_stream>>>(size, input_addr, output_addr);
}
template CUDA_LIB_EXPORT void Mish<float>(const size_t size, const float *input_addr, float *output_addr,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void Mish<half>(const size_t size, const half *input_addr, half *output_addr,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void Mish<double>(const size_t size, const double *input_addr, double *output_addr,
const uint32_t &device_id, cudaStream_t cuda_stream);
|
98f2a411a1bf78b420815498e1750f8c251f031b.hip | // !!! This is a file automatically generated by hipify!!!
#include "luaT.h"
#include "THH.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.cu"
#include "StochasticSpatialConvolution.cu"
#include "StochasticSpatialBatchNormalization.cu"
#include "StochasticSpatialMaxPooling.cu"
#include "StochasticSpatialAveragePooling.cu"
#include "StochasticThreshold.cu"
#include "StochasticSpatialSampling.cu"
LUA_EXTERNC DLL_EXPORT int luaopen_libstcunn(lua_State *L);
int luaopen_libstcunn(lua_State *L)
{
lua_newtable(L);
stcunn_StochasticSpatialConvolution_init(L);
stcunn_StochasticSpatialBatchNormalization_init(L);
stcunn_StochasticSpatialMaxPooling_init(L);
stcunn_StochasticSpatialAveragePooling_init(L);
stcunn_StochasticThreshold_init(L);
stcunn_StochasticSpatialSampling_init(L);
return 1;
}
| 98f2a411a1bf78b420815498e1750f8c251f031b.cu | #include "luaT.h"
#include "THC.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.cu"
#include "StochasticSpatialConvolution.cu"
#include "StochasticSpatialBatchNormalization.cu"
#include "StochasticSpatialMaxPooling.cu"
#include "StochasticSpatialAveragePooling.cu"
#include "StochasticThreshold.cu"
#include "StochasticSpatialSampling.cu"
LUA_EXTERNC DLL_EXPORT int luaopen_libstcunn(lua_State *L);
int luaopen_libstcunn(lua_State *L)
{
lua_newtable(L);
stcunn_StochasticSpatialConvolution_init(L);
stcunn_StochasticSpatialBatchNormalization_init(L);
stcunn_StochasticSpatialMaxPooling_init(L);
stcunn_StochasticSpatialAveragePooling_init(L);
stcunn_StochasticThreshold_init(L);
stcunn_StochasticSpatialSampling_init(L);
return 1;
}
|
3d1ceb5003dc12415167d84747f851531eba071e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include "yololayer.h"
#include "utils.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin()
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(hipFree(mAnchor[ii]));
}
CUDA_CHECK(hipHostFree(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2*k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2*k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) {
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
hipLaunchKernelGGL(( CalDetection), dim3((yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0,
inputs[i], output, numElem, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
YoloLayerPlugin* obj = new YoloLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| 3d1ceb5003dc12415167d84747f851531eba071e.cu | #include <assert.h>
#include "yololayer.h"
#include "utils.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin()
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(cudaFree(mAnchor[ii]));
}
CUDA_CHECK(cudaFreeHost(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char *)res_count + sizeof(float) + count * sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2*k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2*k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) {
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
CalDetection<<< (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount>>>
(inputs[i], output, numElem, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
YoloLayerPlugin* obj = new YoloLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
2fa208c8db65ecc52bacd2688686cd175b573d5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/cast_op.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
namespace {
template <typename DstType, typename SrcType>
__global__ void CastKernel(const int N, const SrcType* X, DstType* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
// Y[i] = static_cast<DstType>(X[i]);
Y[i] = convert::To<SrcType, DstType>(X[i]);
}
}
} // namespace
template <>
template <typename DstType, typename SrcType>
bool CastOp<CUDAContext>::DoRunWithType() {
auto& input = Input(0);
auto* output = Output(0);
output->ResizeLike(input);
const auto* data = input.template data<SrcType>();
auto* out = output->template mutable_data<DstType>();
DCHECK(input.size() < INT_MAX);
int N = input.size();
hipLaunchKernelGGL(( CastKernel<DstType, SrcType>),
dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(), N, data, out);
return true;
}
template <>
template <typename DstType>
bool CastOp<CUDAContext>::DoRunWithDstType() {
return DispatchHelper<
TensorTypes<
float,
int32_t,
bool,
uint8_t,
int8_t,
uint16_t,
int16_t,
int64_t,
double>,
DstType>::call(this, Input(0));
}
// specific version that allows for casting to fp16
template <>
template <>
bool CastOp<CUDAContext>::DoRunWithDstType<float>() {
return DispatchHelper<
TensorTypes<
float,
float16,
int32_t,
bool,
uint8_t,
int8_t,
uint16_t,
int16_t,
int64_t,
double>,
float /* DstType */>::call(this, Input(0));
}
// specific version for casting _from_ fp16
template <>
template <>
bool CastOp<CUDAContext>::DoRunWithDstType<float16>() {
return DispatchHelper<
TensorTypes<
float,
float16>,
float16 /* DstType */>::call(this, Input(0));
}
template <>
void CastOp<CUDAContext>::SetBody(TensorProto_DataType to) {
switch (to) {
case TensorProto_DataType_FLOAT:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<float>;
break;
case TensorProto_DataType_INT32:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int>;
break;
case TensorProto_DataType_BYTE:
LOG(FATAL) << "BYTE is deprecated";
break;
case TensorProto_DataType_STRING:
CAFFE_THROW("Casting to and from strings is not supported yet");
// break;
case TensorProto_DataType_BOOL:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<bool>;
break;
case TensorProto_DataType_UINT8:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<uint8_t>;
break;
case TensorProto_DataType_INT8:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int8_t>;
break;
case TensorProto_DataType_UINT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<uint16_t>;
break;
case TensorProto_DataType_INT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int16_t>;
break;
case TensorProto_DataType_INT64:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int64_t>;
break;
case TensorProto_DataType_FLOAT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<float16>;
break;
case TensorProto_DataType_DOUBLE:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<double>;
break;
case TensorProto_DataType_UNDEFINED:
CAFFE_THROW("Cast op must have 'to' argument of type DataType");
// break;
default:
CAFFE_THROW("Unexpected 'to' argument value: ", to);
}
}
REGISTER_CUDA_OPERATOR(Cast, CastOp<CUDAContext>);
} // namespace caffe2
| 2fa208c8db65ecc52bacd2688686cd175b573d5d.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/cast_op.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
namespace {
template <typename DstType, typename SrcType>
__global__ void CastKernel(const int N, const SrcType* X, DstType* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
// Y[i] = static_cast<DstType>(X[i]);
Y[i] = convert::To<SrcType, DstType>(X[i]);
}
}
} // namespace
template <>
template <typename DstType, typename SrcType>
bool CastOp<CUDAContext>::DoRunWithType() {
auto& input = Input(0);
auto* output = Output(0);
output->ResizeLike(input);
const auto* data = input.template data<SrcType>();
auto* out = output->template mutable_data<DstType>();
DCHECK(input.size() < INT_MAX);
int N = input.size();
CastKernel<DstType, SrcType><<<
CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(N, data, out);
return true;
}
template <>
template <typename DstType>
bool CastOp<CUDAContext>::DoRunWithDstType() {
return DispatchHelper<
TensorTypes<
float,
int32_t,
bool,
uint8_t,
int8_t,
uint16_t,
int16_t,
int64_t,
double>,
DstType>::call(this, Input(0));
}
// specific version that allows for casting to fp16
template <>
template <>
bool CastOp<CUDAContext>::DoRunWithDstType<float>() {
return DispatchHelper<
TensorTypes<
float,
float16,
int32_t,
bool,
uint8_t,
int8_t,
uint16_t,
int16_t,
int64_t,
double>,
float /* DstType */>::call(this, Input(0));
}
// specific version for casting _from_ fp16
template <>
template <>
bool CastOp<CUDAContext>::DoRunWithDstType<float16>() {
return DispatchHelper<
TensorTypes<
float,
float16>,
float16 /* DstType */>::call(this, Input(0));
}
template <>
void CastOp<CUDAContext>::SetBody(TensorProto_DataType to) {
switch (to) {
case TensorProto_DataType_FLOAT:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<float>;
break;
case TensorProto_DataType_INT32:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int>;
break;
case TensorProto_DataType_BYTE:
LOG(FATAL) << "BYTE is deprecated";
break;
case TensorProto_DataType_STRING:
CAFFE_THROW("Casting to and from strings is not supported yet");
// break;
case TensorProto_DataType_BOOL:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<bool>;
break;
case TensorProto_DataType_UINT8:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<uint8_t>;
break;
case TensorProto_DataType_INT8:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int8_t>;
break;
case TensorProto_DataType_UINT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<uint16_t>;
break;
case TensorProto_DataType_INT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int16_t>;
break;
case TensorProto_DataType_INT64:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<int64_t>;
break;
case TensorProto_DataType_FLOAT16:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<float16>;
break;
case TensorProto_DataType_DOUBLE:
body_ = &CastOp<CUDAContext>::DoRunWithDstType<double>;
break;
case TensorProto_DataType_UNDEFINED:
CAFFE_THROW("Cast op must have 'to' argument of type DataType");
// break;
default:
CAFFE_THROW("Unexpected 'to' argument value: ", to);
}
}
REGISTER_CUDA_OPERATOR(Cast, CastOp<CUDAContext>);
} // namespace caffe2
|
2667a3eca33714407817fcb4aa7aad8b503fcebb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "add_strided_float.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int xOffset = 1;
int yOffset = 1;
float *dx = NULL;
hipMalloc(&dx, XSIZE*YSIZE);
float *dy = NULL;
hipMalloc(&dy, XSIZE*YSIZE);
int incx = 1;
int incy = 1;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
add_strided_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,xOffset,yOffset,dx,dy,incx,incy,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
add_strided_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,xOffset,yOffset,dx,dy,incx,incy,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
add_strided_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,xOffset,yOffset,dx,dy,incx,incy,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2667a3eca33714407817fcb4aa7aad8b503fcebb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "add_strided_float.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int xOffset = 1;
int yOffset = 1;
float *dx = NULL;
cudaMalloc(&dx, XSIZE*YSIZE);
float *dy = NULL;
cudaMalloc(&dy, XSIZE*YSIZE);
int incx = 1;
int incy = 1;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
add_strided_float<<<gridBlock,threadBlock>>>(n,xOffset,yOffset,dx,dy,incx,incy,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
add_strided_float<<<gridBlock,threadBlock>>>(n,xOffset,yOffset,dx,dy,incx,incy,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
add_strided_float<<<gridBlock,threadBlock>>>(n,xOffset,yOffset,dx,dy,incx,incy,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
117b159a2aa878b97681f33f1bfba044c66007f4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "glm/ridge.h"
#include <gtest/gtest.h>
#include <cuda_utils.h>
#include <test_utils.h>
#include "ml_utils.h"
namespace ML {
namespace GLM {
using namespace MLCommon;
template<typename T>
struct RidgeInputs {
T tol;
int n_row;
int n_col;
int n_row_2;
int algo;
T alpha;
};
template<typename T>
class RidgeTest: public ::testing::TestWithParam<RidgeInputs<T> > {
protected:
void basicTest() {
params = ::testing::TestWithParam<RidgeInputs<T>>::GetParam();
int len = params.n_row * params.n_col;
int len2 = params.n_row_2 * params.n_col;
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(data, len);
allocate(labels, params.n_row);
allocate(coef, params.n_col);
allocate(coef2, params.n_col);
allocate(coef3, params.n_col);
allocate(coef_ref, params.n_col);
allocate(coef2_ref, params.n_col);
allocate(coef3_ref, params.n_col);
allocate(pred_data, len2);
allocate(pred, params.n_row_2);
allocate(pred_ref, params.n_row_2);
allocate(pred2, params.n_row_2);
allocate(pred2_ref, params.n_row_2);
allocate(pred3, params.n_row_2);
allocate(pred3_ref, params.n_row_2);
T alpha = params.alpha;
T data_h[len] = { 0.0, 0.0, 1.0, 0.0, 0.0, 1.0 };
updateDevice(data, data_h, len, stream);
T labels_h[params.n_row] = { 0.0, 0.1, 1.0 };
updateDevice(labels, labels_h, params.n_row, stream);
T coef_ref_h[params.n_col] = { 0.39999998, 0.4 };
updateDevice(coef_ref, coef_ref_h, params.n_col, stream);
T coef2_ref_h[params.n_col] = { 0.3454546, 0.34545454 };
updateDevice(coef2_ref, coef2_ref_h, params.n_col, stream);
T coef3_ref_h[params.n_col] = { 0.3799999, 0.38000008 };
updateDevice(coef3_ref, coef3_ref_h, params.n_col, stream);
T pred_data_h[len2] = { 0.5, 2.0, 0.2, 1.0 };
updateDevice(pred_data, pred_data_h, len2, stream);
T pred_ref_h[params.n_row_2] = { 0.28, 1.1999999 };
updateDevice(pred_ref, pred_ref_h, params.n_row_2, stream);
T pred2_ref_h[params.n_row_2] = { 0.37818184, 1.1727273 };
updateDevice(pred2_ref, pred2_ref_h, params.n_row_2, stream);
T pred3_ref_h[params.n_row_2] = { 0.37933332, 1.2533332 };
updateDevice(pred3_ref, pred3_ref_h, params.n_row_2, stream);
intercept = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef,
&intercept, false, false, cublas_handle, cusolver_handle,
stream, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef, intercept,
pred, cublas_handle, stream);
updateDevice(data, data_h, len, stream);
updateDevice(labels, labels_h, params.n_row, stream);
intercept2 = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef2,
&intercept2, true, false, cublas_handle, cusolver_handle,
stream, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef2, intercept2,
pred2, cublas_handle, stream);
updateDevice(data, data_h, len, stream);
updateDevice(labels, labels_h, params.n_row, stream);
intercept3 = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef3,
&intercept3, true, true, cublas_handle, cusolver_handle,
stream, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef3, intercept3,
pred3, cublas_handle, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
void basicTest2() {
params = ::testing::TestWithParam<RidgeInputs<T>>::GetParam();
int len = params.n_row * params.n_col;
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(data_sc, len);
allocate(labels_sc, len);
allocate(coef_sc, 1);
allocate(coef_sc_ref, 1);
std::vector<T> data_h = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0};
data_h.resize(len);
updateDevice(data_sc, data_h.data(), len, stream);
std::vector<T> labels_h = {6.0, 8.0, 9.0, 11.0, -1.0, 2.0};
labels_h.resize(len);
updateDevice(labels_sc, labels_h.data(), len, stream);
std::vector<T> coef_sc_ref_h = {1.8};
coef_sc_ref_h.resize(1);
updateDevice(coef_sc_ref, coef_sc_ref_h.data(), 1, stream);
T intercept_sc = T(0);
T alpha_sc = T(1.0);
ridgeFit(data_sc, len, 1, labels_sc, &alpha_sc, 1, coef_sc,
&intercept_sc, true, false, cublas_handle, cusolver_handle,
stream, params.algo);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
void SetUp() override {
basicTest();
basicTest2();
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
CUDA_CHECK(hipFree(coef_ref));
CUDA_CHECK(hipFree(coef2));
CUDA_CHECK(hipFree(coef2_ref));
CUDA_CHECK(hipFree(coef3));
CUDA_CHECK(hipFree(coef3_ref));
CUDA_CHECK(hipFree(pred_data));
CUDA_CHECK(hipFree(pred));
CUDA_CHECK(hipFree(pred_ref));
CUDA_CHECK(hipFree(pred2));
CUDA_CHECK(hipFree(pred2_ref));
CUDA_CHECK(hipFree(pred3));
CUDA_CHECK(hipFree(pred3_ref));
CUDA_CHECK(hipFree(data_sc));
CUDA_CHECK(hipFree(labels_sc));
CUDA_CHECK(hipFree(coef_sc));
CUDA_CHECK(hipFree(coef_sc_ref));
}
protected:
RidgeInputs<T> params;
T *data, *labels, *coef, *coef_ref, *pred_data, *pred, *pred_ref;
T *coef2, *coef2_ref, *pred2, *pred2_ref;
T *coef3, *coef3_ref, *pred3, *pred3_ref;
T *data_sc, *labels_sc, *coef_sc, *coef_sc_ref;
T intercept, intercept2, intercept3;
};
const std::vector<RidgeInputs<float> > inputsf2 = {
{ 0.001f, 3, 2, 2, 0, 0.5f },
{ 0.001f, 3, 2, 2, 1, 0.5f } };
const std::vector<RidgeInputs<double> > inputsd2 = {
{ 0.001, 3, 2, 2, 0, 0.5 },
{ 0.001, 3, 2, 2, 1, 0.5 } };
typedef RidgeTest<float> RidgeTestF;
TEST_P(RidgeTestF, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef3_ref, coef3, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_ref, pred, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred2_ref, pred2, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred3_ref, pred3, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef_sc_ref, coef_sc, 1,
CompareApproxAbs<float>(params.tol)));
}
typedef RidgeTest<double> RidgeTestD;
TEST_P(RidgeTestD, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef3_ref, coef3, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_ref, pred, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred2_ref, pred2, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred3_ref, pred3, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef_sc_ref, coef_sc, 1,
CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(RidgeTests, RidgeTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(RidgeTests, RidgeTestD, ::testing::ValuesIn(inputsd2));
}
} // end namespace ML
| 117b159a2aa878b97681f33f1bfba044c66007f4.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "glm/ridge.h"
#include <gtest/gtest.h>
#include <cuda_utils.h>
#include <test_utils.h>
#include "ml_utils.h"
namespace ML {
namespace GLM {
using namespace MLCommon;
template<typename T>
struct RidgeInputs {
T tol;
int n_row;
int n_col;
int n_row_2;
int algo;
T alpha;
};
template<typename T>
class RidgeTest: public ::testing::TestWithParam<RidgeInputs<T> > {
protected:
void basicTest() {
params = ::testing::TestWithParam<RidgeInputs<T>>::GetParam();
int len = params.n_row * params.n_col;
int len2 = params.n_row_2 * params.n_col;
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(data, len);
allocate(labels, params.n_row);
allocate(coef, params.n_col);
allocate(coef2, params.n_col);
allocate(coef3, params.n_col);
allocate(coef_ref, params.n_col);
allocate(coef2_ref, params.n_col);
allocate(coef3_ref, params.n_col);
allocate(pred_data, len2);
allocate(pred, params.n_row_2);
allocate(pred_ref, params.n_row_2);
allocate(pred2, params.n_row_2);
allocate(pred2_ref, params.n_row_2);
allocate(pred3, params.n_row_2);
allocate(pred3_ref, params.n_row_2);
T alpha = params.alpha;
T data_h[len] = { 0.0, 0.0, 1.0, 0.0, 0.0, 1.0 };
updateDevice(data, data_h, len, stream);
T labels_h[params.n_row] = { 0.0, 0.1, 1.0 };
updateDevice(labels, labels_h, params.n_row, stream);
T coef_ref_h[params.n_col] = { 0.39999998, 0.4 };
updateDevice(coef_ref, coef_ref_h, params.n_col, stream);
T coef2_ref_h[params.n_col] = { 0.3454546, 0.34545454 };
updateDevice(coef2_ref, coef2_ref_h, params.n_col, stream);
T coef3_ref_h[params.n_col] = { 0.3799999, 0.38000008 };
updateDevice(coef3_ref, coef3_ref_h, params.n_col, stream);
T pred_data_h[len2] = { 0.5, 2.0, 0.2, 1.0 };
updateDevice(pred_data, pred_data_h, len2, stream);
T pred_ref_h[params.n_row_2] = { 0.28, 1.1999999 };
updateDevice(pred_ref, pred_ref_h, params.n_row_2, stream);
T pred2_ref_h[params.n_row_2] = { 0.37818184, 1.1727273 };
updateDevice(pred2_ref, pred2_ref_h, params.n_row_2, stream);
T pred3_ref_h[params.n_row_2] = { 0.37933332, 1.2533332 };
updateDevice(pred3_ref, pred3_ref_h, params.n_row_2, stream);
intercept = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef,
&intercept, false, false, cublas_handle, cusolver_handle,
stream, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef, intercept,
pred, cublas_handle, stream);
updateDevice(data, data_h, len, stream);
updateDevice(labels, labels_h, params.n_row, stream);
intercept2 = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef2,
&intercept2, true, false, cublas_handle, cusolver_handle,
stream, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef2, intercept2,
pred2, cublas_handle, stream);
updateDevice(data, data_h, len, stream);
updateDevice(labels, labels_h, params.n_row, stream);
intercept3 = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef3,
&intercept3, true, true, cublas_handle, cusolver_handle,
stream, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef3, intercept3,
pred3, cublas_handle, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void basicTest2() {
params = ::testing::TestWithParam<RidgeInputs<T>>::GetParam();
int len = params.n_row * params.n_col;
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(data_sc, len);
allocate(labels_sc, len);
allocate(coef_sc, 1);
allocate(coef_sc_ref, 1);
std::vector<T> data_h = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0};
data_h.resize(len);
updateDevice(data_sc, data_h.data(), len, stream);
std::vector<T> labels_h = {6.0, 8.0, 9.0, 11.0, -1.0, 2.0};
labels_h.resize(len);
updateDevice(labels_sc, labels_h.data(), len, stream);
std::vector<T> coef_sc_ref_h = {1.8};
coef_sc_ref_h.resize(1);
updateDevice(coef_sc_ref, coef_sc_ref_h.data(), 1, stream);
T intercept_sc = T(0);
T alpha_sc = T(1.0);
ridgeFit(data_sc, len, 1, labels_sc, &alpha_sc, 1, coef_sc,
&intercept_sc, true, false, cublas_handle, cusolver_handle,
stream, params.algo);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void SetUp() override {
basicTest();
basicTest2();
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
CUDA_CHECK(cudaFree(coef_ref));
CUDA_CHECK(cudaFree(coef2));
CUDA_CHECK(cudaFree(coef2_ref));
CUDA_CHECK(cudaFree(coef3));
CUDA_CHECK(cudaFree(coef3_ref));
CUDA_CHECK(cudaFree(pred_data));
CUDA_CHECK(cudaFree(pred));
CUDA_CHECK(cudaFree(pred_ref));
CUDA_CHECK(cudaFree(pred2));
CUDA_CHECK(cudaFree(pred2_ref));
CUDA_CHECK(cudaFree(pred3));
CUDA_CHECK(cudaFree(pred3_ref));
CUDA_CHECK(cudaFree(data_sc));
CUDA_CHECK(cudaFree(labels_sc));
CUDA_CHECK(cudaFree(coef_sc));
CUDA_CHECK(cudaFree(coef_sc_ref));
}
protected:
RidgeInputs<T> params;
T *data, *labels, *coef, *coef_ref, *pred_data, *pred, *pred_ref;
T *coef2, *coef2_ref, *pred2, *pred2_ref;
T *coef3, *coef3_ref, *pred3, *pred3_ref;
T *data_sc, *labels_sc, *coef_sc, *coef_sc_ref;
T intercept, intercept2, intercept3;
};
const std::vector<RidgeInputs<float> > inputsf2 = {
{ 0.001f, 3, 2, 2, 0, 0.5f },
{ 0.001f, 3, 2, 2, 1, 0.5f } };
const std::vector<RidgeInputs<double> > inputsd2 = {
{ 0.001, 3, 2, 2, 0, 0.5 },
{ 0.001, 3, 2, 2, 1, 0.5 } };
typedef RidgeTest<float> RidgeTestF;
TEST_P(RidgeTestF, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef3_ref, coef3, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_ref, pred, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred2_ref, pred2, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred3_ref, pred3, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef_sc_ref, coef_sc, 1,
CompareApproxAbs<float>(params.tol)));
}
typedef RidgeTest<double> RidgeTestD;
TEST_P(RidgeTestD, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef3_ref, coef3, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_ref, pred, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred2_ref, pred2, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred3_ref, pred3, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef_sc_ref, coef_sc, 1,
CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(RidgeTests, RidgeTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(RidgeTests, RidgeTestD, ::testing::ValuesIn(inputsd2));
}
} // end namespace ML
|
607be3e9b2e8419685a0b1c36b2f5e9765b8b32f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches zlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
zgeadd_batched_kernel(
int m, int n,
magmaDoubleComplex alpha,
const magmaDoubleComplex * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaDoubleComplex *dA = dAarray[ blockIdx.y ];
magmaDoubleComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaDoubleComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX_16
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_zgeadd_batched(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
hipLaunchKernelGGL(( zgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
| 607be3e9b2e8419685a0b1c36b2f5e9765b8b32f.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches zlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
zgeadd_batched_kernel(
int m, int n,
magmaDoubleComplex alpha,
const magmaDoubleComplex * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaDoubleComplex *dA = dAarray[ blockIdx.y ];
magmaDoubleComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaDoubleComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX_16
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_zgeadd_batched(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
zgeadd_batched_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
|
73cbbb9bd0a78011c062ed641a8060283e7ee201.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void callOperation(int *a, int *b, int *c, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) {
return;
}
if (a[tid] <= b[tid])
{
c[tid] = a[tid];
}
else
{
c[tid] = b[tid];
}
} | 73cbbb9bd0a78011c062ed641a8060283e7ee201.cu | #include "includes.h"
__global__ void callOperation(int *a, int *b, int *c, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) {
return;
}
if (a[tid] <= b[tid])
{
c[tid] = a[tid];
}
else
{
c[tid] = b[tid];
}
} |
63fa76b2d05c1f4303ee65653f04f6d69250d818.hip | // !!! This is a file automatically generated by hipify!!!
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <stdlib.h>
//
//__global__ void unique_index_calc_threadIdx(int * data)
//{
// int tid = threadIdx.x;
// printf("threadIdx.x : %d - data : %d \n", tid, data[tid]);
//}
//
//
//__global__ void unique_gid_calculation(int * data)
//{
// int tid = threadIdx.x;
// int offset = blockIdx.x * blockDim.x;
// int gid = tid + offset;
//
// printf("blockIdx.x : %d, threadIdx.x : %d - data : %d \n",
// blockIdx.x, tid, data[gid]);
//}
//
//int main()
//{
// int array_size = 16;
// int array_byte_size = sizeof(int) * array_size;
// int h_data[] = { 23,9,4,53,65,12,1,33,22,43,56,1,76,81,94,32 };
//
// for (int i = 0; i < array_size; i++)
// {
// printf("%d ", h_data[i]);
// }
// printf("\n \n");
//
// int * d_data;
// hipMalloc((void**)&d_data, array_byte_size);
// hipMemcpy(d_data, h_data, array_byte_size, hipMemcpyHostToDevice);
//
// dim3 block(4);
// dim3 grid(2);
//
// unique_index_calc_threadIdx << < grid, block >> > (d_data);
// hipDeviceSynchronize();
//
// hipDeviceReset();
// return 0;
//} | 63fa76b2d05c1f4303ee65653f04f6d69250d818.cu | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <stdlib.h>
//
//__global__ void unique_index_calc_threadIdx(int * data)
//{
// int tid = threadIdx.x;
// printf("threadIdx.x : %d - data : %d \n", tid, data[tid]);
//}
//
//
//__global__ void unique_gid_calculation(int * data)
//{
// int tid = threadIdx.x;
// int offset = blockIdx.x * blockDim.x;
// int gid = tid + offset;
//
// printf("blockIdx.x : %d, threadIdx.x : %d - data : %d \n",
// blockIdx.x, tid, data[gid]);
//}
//
//int main()
//{
// int array_size = 16;
// int array_byte_size = sizeof(int) * array_size;
// int h_data[] = { 23,9,4,53,65,12,1,33,22,43,56,1,76,81,94,32 };
//
// for (int i = 0; i < array_size; i++)
// {
// printf("%d ", h_data[i]);
// }
// printf("\n \n");
//
// int * d_data;
// cudaMalloc((void**)&d_data, array_byte_size);
// cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
//
// dim3 block(4);
// dim3 grid(2);
//
// unique_index_calc_threadIdx << < grid, block >> > (d_data);
// cudaDeviceSynchronize();
//
// cudaDeviceReset();
// return 0;
//} |
c7e30796c183ace5ad93230635888cbde94c7455.hip | // !!! This is a file automatically generated by hipify!!!
/*----------------------------*/
/* ALGORITHMIC TOOLS CLASS */
/* - RAND.PERM. SUBCLASS - */
/* IMPLEMENTATION */
/*----------------------------*/
#include <iostream>
#include <algorithm>
#include "GlobalDeclarations.cuh"
#include "Algorithms.cuh"
using namespace std;
/*********************************
* DECLARATIONS *
* ____________ *
*********************************/
unsigned int* output_array = NULL;
/*********************************
* CONSTANT FIELDS *
* _______________ *
*********************************/
unsigned int static_values[9] = {
1, 2, 6, 5, 0, 4, 7, 3, 8
};
/*************************************
* MEMBER FUNCTIONS *
* ________________ *
*************************************/
/*---------------------------------------*/
/* Generate a set of random permutations */
/*---------------------------------------*/
unsigned int* Algorithms::RandomPermutation::generateRandPerm(unsigned int perm_size) {
#if STATIC_GENERATION
return static_values;
#else
random_shuffle(output_array, output_array + perm_size);
return output_array;
#endif
}
/*--------------------------------------*/
/* Set the size of the generator buffer */
/* to be used in the calculations. */
/*--------------------------------------*/
bool Algorithms::RandomPermutation::setGeneratorBuffer(unsigned int* b, int permsize) {
output_array = b;
// Initialize the given buffer space
for (int j = 0; j < permsize; j++) {
b[j] = j;
}
hipDeviceSynchronize();
return true;
} | c7e30796c183ace5ad93230635888cbde94c7455.cu | /*----------------------------*/
/* ALGORITHMIC TOOLS CLASS */
/* - RAND.PERM. SUBCLASS - */
/* IMPLEMENTATION */
/*----------------------------*/
#include <iostream>
#include <algorithm>
#include "GlobalDeclarations.cuh"
#include "Algorithms.cuh"
using namespace std;
/*********************************
* DECLARATIONS *
* ____________ *
*********************************/
unsigned int* output_array = NULL;
/*********************************
* CONSTANT FIELDS *
* _______________ *
*********************************/
unsigned int static_values[9] = {
1, 2, 6, 5, 0, 4, 7, 3, 8
};
/*************************************
* MEMBER FUNCTIONS *
* ________________ *
*************************************/
/*---------------------------------------*/
/* Generate a set of random permutations */
/*---------------------------------------*/
unsigned int* Algorithms::RandomPermutation::generateRandPerm(unsigned int perm_size) {
#if STATIC_GENERATION
return static_values;
#else
random_shuffle(output_array, output_array + perm_size);
return output_array;
#endif
}
/*--------------------------------------*/
/* Set the size of the generator buffer */
/* to be used in the calculations. */
/*--------------------------------------*/
bool Algorithms::RandomPermutation::setGeneratorBuffer(unsigned int* b, int permsize) {
output_array = b;
// Initialize the given buffer space
for (int j = 0; j < permsize; j++) {
b[j] = j;
}
cudaDeviceSynchronize();
return true;
} |
d39bb8b6c1e3b84d029be5cd8d5c84e71eb1ac79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2012-2014, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "splines/oskar_dierckx_bispev_cuda.h"
#ifdef __cplusplus
extern "C" {
#endif
static __global__ void oskar_set_zeros_f(float* out, int n, int stride);
static __global__ void oskar_set_zeros_d(double* out, int n, int stride);
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_dierckx_bispev_cuda_f(const float* d_tx, int nx,
const float* d_ty, int ny, const float* d_c, int kx, int ky,
int n, const float* d_x, const float* d_y, int stride, float* d_z)
{
/* Evaluate surface at the points by calling kernel. */
int num_blocks, num_threads = 256;
num_blocks = (n + num_threads - 1) / num_threads;
if (!d_tx || !d_ty || !d_c || nx == 0 || ny == 0)
{
oskar_set_zeros_f
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_z, n, stride);
}
else
{
oskar_dierckx_bispev_cudak_f
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_tx, nx, d_ty, ny,
d_c, kx, ky, n, d_x, d_y, stride, d_z);
}
}
/* Double precision. */
void oskar_dierckx_bispev_cuda_d(const double* d_tx, int nx,
const double* d_ty, int ny, const double* d_c, int kx, int ky,
int n, const double* d_x, const double* d_y, int stride, double* d_z)
{
/* Evaluate surface at the points by calling kernel. */
int num_blocks, num_threads = 256;
num_blocks = (n + num_threads - 1) / num_threads;
if (!d_tx || !d_ty || !d_c || nx == 0 || ny == 0)
{
oskar_set_zeros_d
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_z, n, stride);
}
else
{
oskar_dierckx_bispev_cudak_d
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_tx, nx, d_ty, ny,
d_c, kx, ky, n, d_x, d_y, stride, d_z);
}
}
/* Kernels and device functions. ========================================== */
/**
* @brief
* CUDA device function for fpbspl from DIERCKX library (single precision).
*
* @details
* CUDA device function to replace the fpbspl function from the DIERCKX
* fitting library.
*
* This routine evaluates the (k+1) non-zero b-splines of degree k
* at t(l) <= x < t(l+1) using the stable recurrence relation of
* de Boor and Cox.
*/
__device__
void oskar_cudaf_dierckx_fpbspl_f(const float *t, const int k,
const float x, const int l, float *h)
{
float f, hh[5];
int i, j, li, lj;
h[0] = 1.0f;
for (j = 1; j <= k; ++j)
{
for (i = 0; i < j; ++i)
{
hh[i] = h[i];
}
h[0] = 0.0f;
for (i = 0; i < j; ++i)
{
li = l + i;
lj = li - j;
f = hh[i] / (t[li] - t[lj]);
h[i] += f * (t[li] - x);
h[i + 1] = f * (x - t[lj]);
}
}
}
/**
* @brief
* CUDA device function for fpbspl from DIERCKX library (double precision).
*
* @details
* CUDA device function to replace the fpbspl function from the DIERCKX
* fitting library.
*
* This routine evaluates the (k+1) non-zero b-splines of degree k
* at t(l) <= x < t(l+1) using the stable recurrence relation of
* de Boor and Cox.
*/
__device__
void oskar_cudaf_dierckx_fpbspl_d(const double *t, const int k,
const double x, const int l, double *h)
{
double f, hh[5];
int i, j, li, lj;
h[0] = 1.0;
for (j = 1; j <= k; ++j)
{
for (i = 0; i < j; ++i)
{
hh[i] = h[i];
}
h[0] = 0.0;
for (i = 0; i < j; ++i)
{
li = l + i;
lj = li - j;
f = hh[i] / (t[li] - t[lj]);
h[i] += f * (t[li] - x);
h[i + 1] = f * (x - t[lj]);
}
}
}
/**
* @brief
* CUDA device function for fpbisp from DIERCKX library (single precision).
*
* @details
* CUDA device function to replace the fpbisp function from the DIERCKX
* fitting library.
*/
__device__
void oskar_cudaf_dierckx_fpbisp_single_f(const float *tx, const int nx,
const float *ty, const int ny, const float *c, const int kx,
const int ky, float x, float y, float *z)
{
int j, l, l1, l2, k1, nk1, lx;
float wx[6], wy[6], t;
/* Do x. */
k1 = kx + 1;
nk1 = nx - k1;
t = tx[kx];
if (x < t) x = t;
t = tx[nk1];
if (x > t) x = t;
l = k1;
while (!(x < tx[l] || l == nk1)) l++;
oskar_cudaf_dierckx_fpbspl_f(tx, kx, x, l, wx);
lx = l - k1;
/* Do y. */
k1 = ky + 1;
nk1 = ny - k1;
t = ty[ky];
if (y < t) y = t;
t = ty[nk1];
if (y > t) y = t;
l = k1;
while (!(y < ty[l] || l == nk1)) l++;
oskar_cudaf_dierckx_fpbspl_f(ty, ky, y, l, wy);
l1 = lx * nk1 + (l - k1);
/* Evaluate surface using coefficients. */
t = 0.0f;
for (l = 0; l <= kx; ++l)
{
l2 = l1;
for (j = 0; j <= ky; ++j)
{
t += c[l2] * wx[l] * wy[j];
++l2;
}
l1 += nk1;
}
*z = t;
}
/**
* @brief
* CUDA device function for fpbisp from DIERCKX library (double precision).
*
* @details
* CUDA device function to replace the fpbisp function from the DIERCKX
* fitting library.
*/
__device__
void oskar_cudaf_dierckx_fpbisp_single_d(const double *tx, const int nx,
const double *ty, const int ny, const double *c, const int kx,
const int ky, double x, double y, double *z)
{
int j, l, l1, l2, k1, nk1, lx;
double wx[6], wy[6], t;
/* Do x. */
k1 = kx + 1;
nk1 = nx - k1;
t = tx[kx];
if (x < t) x = t;
t = tx[nk1];
if (x > t) x = t;
l = k1;
while (!(x < tx[l] || l == nk1)) l++;
oskar_cudaf_dierckx_fpbspl_d(tx, kx, x, l, wx);
lx = l - k1;
/* Do y. */
k1 = ky + 1;
nk1 = ny - k1;
t = ty[ky];
if (y < t) y = t;
t = ty[nk1];
if (y > t) y = t;
l = k1;
while (!(y < ty[l] || l == nk1)) l++;
oskar_cudaf_dierckx_fpbspl_d(ty, ky, y, l, wy);
l1 = lx * nk1 + (l - k1);
/* Evaluate surface using coefficients. */
t = 0.0;
for (l = 0; l <= kx; ++l)
{
l2 = l1;
for (j = 0; j <= ky; ++j)
{
t += c[l2] * wx[l] * wy[j];
++l2;
}
l1 += nk1;
}
*z = t;
}
/* Single precision. */
__global__
void oskar_dierckx_bispev_cudak_f(const float* tx, const int nx,
const float* ty, const int ny, const float* c, const int kx,
const int ky, const int n, const float* x, const float* y,
const int stride, float* z)
{
/* Get the output position (pixel) ID that this thread is working on. */
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
/* Call device function to evaluate surface. */
oskar_cudaf_dierckx_fpbisp_single_f(tx, nx, ty, ny, c, kx, ky,
x[i], y[i], &z[i * stride]);
}
static __global__
void oskar_set_zeros_f(float* out, int n, int stride)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
out[i * stride] = 0.0f;
}
/* Double precision. */
__global__
void oskar_dierckx_bispev_cudak_d(const double* tx, const int nx,
const double* ty, const int ny, const double* c, const int kx,
const int ky, const int n, const double* x, const double* y,
const int stride, double* z)
{
/* Get the output position (pixel) ID that this thread is working on. */
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
/* Call device function to evaluate surface. */
oskar_cudaf_dierckx_fpbisp_single_d(tx, nx, ty, ny, c, kx, ky,
x[i], y[i], &z[i * stride]);
}
static __global__
void oskar_set_zeros_d(double* out, int n, int stride)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
out[i * stride] = 0.0;
}
#ifdef __cplusplus
}
#endif
| d39bb8b6c1e3b84d029be5cd8d5c84e71eb1ac79.cu | /*
* Copyright (c) 2012-2014, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "splines/oskar_dierckx_bispev_cuda.h"
#ifdef __cplusplus
extern "C" {
#endif
static __global__ void oskar_set_zeros_f(float* out, int n, int stride);
static __global__ void oskar_set_zeros_d(double* out, int n, int stride);
/* Kernel wrappers. ======================================================== */
/* Single precision. */
void oskar_dierckx_bispev_cuda_f(const float* d_tx, int nx,
const float* d_ty, int ny, const float* d_c, int kx, int ky,
int n, const float* d_x, const float* d_y, int stride, float* d_z)
{
/* Evaluate surface at the points by calling kernel. */
int num_blocks, num_threads = 256;
num_blocks = (n + num_threads - 1) / num_threads;
if (!d_tx || !d_ty || !d_c || nx == 0 || ny == 0)
{
oskar_set_zeros_f
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_z, n, stride);
}
else
{
oskar_dierckx_bispev_cudak_f
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_tx, nx, d_ty, ny,
d_c, kx, ky, n, d_x, d_y, stride, d_z);
}
}
/* Double precision. */
void oskar_dierckx_bispev_cuda_d(const double* d_tx, int nx,
const double* d_ty, int ny, const double* d_c, int kx, int ky,
int n, const double* d_x, const double* d_y, int stride, double* d_z)
{
/* Evaluate surface at the points by calling kernel. */
int num_blocks, num_threads = 256;
num_blocks = (n + num_threads - 1) / num_threads;
if (!d_tx || !d_ty || !d_c || nx == 0 || ny == 0)
{
oskar_set_zeros_d
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_z, n, stride);
}
else
{
oskar_dierckx_bispev_cudak_d
OSKAR_CUDAK_CONF(num_blocks, num_threads) (d_tx, nx, d_ty, ny,
d_c, kx, ky, n, d_x, d_y, stride, d_z);
}
}
/* Kernels and device functions. ========================================== */
/**
* @brief
* CUDA device function for fpbspl from DIERCKX library (single precision).
*
* @details
* CUDA device function to replace the fpbspl function from the DIERCKX
* fitting library.
*
* This routine evaluates the (k+1) non-zero b-splines of degree k
* at t(l) <= x < t(l+1) using the stable recurrence relation of
* de Boor and Cox.
*/
__device__
void oskar_cudaf_dierckx_fpbspl_f(const float *t, const int k,
const float x, const int l, float *h)
{
float f, hh[5];
int i, j, li, lj;
h[0] = 1.0f;
for (j = 1; j <= k; ++j)
{
for (i = 0; i < j; ++i)
{
hh[i] = h[i];
}
h[0] = 0.0f;
for (i = 0; i < j; ++i)
{
li = l + i;
lj = li - j;
f = hh[i] / (t[li] - t[lj]);
h[i] += f * (t[li] - x);
h[i + 1] = f * (x - t[lj]);
}
}
}
/**
* @brief
* CUDA device function for fpbspl from DIERCKX library (double precision).
*
* @details
* CUDA device function to replace the fpbspl function from the DIERCKX
* fitting library.
*
* This routine evaluates the (k+1) non-zero b-splines of degree k
* at t(l) <= x < t(l+1) using the stable recurrence relation of
* de Boor and Cox.
*/
__device__
void oskar_cudaf_dierckx_fpbspl_d(const double *t, const int k,
const double x, const int l, double *h)
{
double f, hh[5];
int i, j, li, lj;
h[0] = 1.0;
for (j = 1; j <= k; ++j)
{
for (i = 0; i < j; ++i)
{
hh[i] = h[i];
}
h[0] = 0.0;
for (i = 0; i < j; ++i)
{
li = l + i;
lj = li - j;
f = hh[i] / (t[li] - t[lj]);
h[i] += f * (t[li] - x);
h[i + 1] = f * (x - t[lj]);
}
}
}
/**
* @brief
* CUDA device function for fpbisp from DIERCKX library (single precision).
*
* @details
* CUDA device function to replace the fpbisp function from the DIERCKX
* fitting library.
*/
__device__
void oskar_cudaf_dierckx_fpbisp_single_f(const float *tx, const int nx,
const float *ty, const int ny, const float *c, const int kx,
const int ky, float x, float y, float *z)
{
int j, l, l1, l2, k1, nk1, lx;
float wx[6], wy[6], t;
/* Do x. */
k1 = kx + 1;
nk1 = nx - k1;
t = tx[kx];
if (x < t) x = t;
t = tx[nk1];
if (x > t) x = t;
l = k1;
while (!(x < tx[l] || l == nk1)) l++;
oskar_cudaf_dierckx_fpbspl_f(tx, kx, x, l, wx);
lx = l - k1;
/* Do y. */
k1 = ky + 1;
nk1 = ny - k1;
t = ty[ky];
if (y < t) y = t;
t = ty[nk1];
if (y > t) y = t;
l = k1;
while (!(y < ty[l] || l == nk1)) l++;
oskar_cudaf_dierckx_fpbspl_f(ty, ky, y, l, wy);
l1 = lx * nk1 + (l - k1);
/* Evaluate surface using coefficients. */
t = 0.0f;
for (l = 0; l <= kx; ++l)
{
l2 = l1;
for (j = 0; j <= ky; ++j)
{
t += c[l2] * wx[l] * wy[j];
++l2;
}
l1 += nk1;
}
*z = t;
}
/**
* @brief
* CUDA device function for fpbisp from DIERCKX library (double precision).
*
* @details
* CUDA device function to replace the fpbisp function from the DIERCKX
* fitting library.
*/
__device__
void oskar_cudaf_dierckx_fpbisp_single_d(const double *tx, const int nx,
const double *ty, const int ny, const double *c, const int kx,
const int ky, double x, double y, double *z)
{
int j, l, l1, l2, k1, nk1, lx;
double wx[6], wy[6], t;
/* Do x. */
k1 = kx + 1;
nk1 = nx - k1;
t = tx[kx];
if (x < t) x = t;
t = tx[nk1];
if (x > t) x = t;
l = k1;
while (!(x < tx[l] || l == nk1)) l++;
oskar_cudaf_dierckx_fpbspl_d(tx, kx, x, l, wx);
lx = l - k1;
/* Do y. */
k1 = ky + 1;
nk1 = ny - k1;
t = ty[ky];
if (y < t) y = t;
t = ty[nk1];
if (y > t) y = t;
l = k1;
while (!(y < ty[l] || l == nk1)) l++;
oskar_cudaf_dierckx_fpbspl_d(ty, ky, y, l, wy);
l1 = lx * nk1 + (l - k1);
/* Evaluate surface using coefficients. */
t = 0.0;
for (l = 0; l <= kx; ++l)
{
l2 = l1;
for (j = 0; j <= ky; ++j)
{
t += c[l2] * wx[l] * wy[j];
++l2;
}
l1 += nk1;
}
*z = t;
}
/* Single precision. */
__global__
void oskar_dierckx_bispev_cudak_f(const float* tx, const int nx,
const float* ty, const int ny, const float* c, const int kx,
const int ky, const int n, const float* x, const float* y,
const int stride, float* z)
{
/* Get the output position (pixel) ID that this thread is working on. */
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
/* Call device function to evaluate surface. */
oskar_cudaf_dierckx_fpbisp_single_f(tx, nx, ty, ny, c, kx, ky,
x[i], y[i], &z[i * stride]);
}
static __global__
void oskar_set_zeros_f(float* out, int n, int stride)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
out[i * stride] = 0.0f;
}
/* Double precision. */
__global__
void oskar_dierckx_bispev_cudak_d(const double* tx, const int nx,
const double* ty, const int ny, const double* c, const int kx,
const int ky, const int n, const double* x, const double* y,
const int stride, double* z)
{
/* Get the output position (pixel) ID that this thread is working on. */
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
/* Call device function to evaluate surface. */
oskar_cudaf_dierckx_fpbisp_single_d(tx, nx, ty, ny, c, kx, ky,
x[i], y[i], &z[i * stride]);
}
static __global__
void oskar_set_zeros_d(double* out, int n, int stride)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
out[i * stride] = 0.0;
}
#ifdef __cplusplus
}
#endif
|
c1297568cfb4e97262e4876922a4c8aac94c9cf8.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
checkCudaErrors(hipEventCreate(&start));
hipEvent_t stop;
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| c1297568cfb4e97262e4876922a4c8aac94c9cf8.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
checkCudaErrors(cudaEventCreate(&start));
cudaEvent_t stop;
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
709ae6d60db4e1e8194a1b4fd6088d956623a7be.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <cfloat>
#include <string>
#include <vector>
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/fused/fused_bn_add_activation_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/norm_utils.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/float16.h"
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T>
class FusedBatchNormAddActKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::PreconditionNotMet("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
float momentum = ctx.Attr<float>("momentum");
std::string act_type = ctx.Attr<std::string>("act_type");
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON);
// Get the size for each dimension.
// NHWC [batch_size, in_height, in_width, in_channels]
const auto *x = ctx.Input<Tensor>("X");
const auto *z = ctx.Input<Tensor>("Z");
const auto &in_dims = x->dims();
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto *mean_out = ctx.Output<Tensor>("MeanOut");
auto *variance_out = ctx.Output<Tensor>("VarianceOut");
mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
auto *saved_mean = ctx.Output<Tensor>("SavedMean");
auto *saved_variance = ctx.Output<Tensor>("SavedVariance");
saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
auto *y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
int N, C, H, W, D;
const DataLayout data_layout = DataLayout::kNHWC;
ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D);
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
// ------------------- cudnn descriptors ---------------------
auto handle = dev_ctx.cudnn_handle();
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
std::vector<int> dims = {N, C, H, W, D};
std::vector<int> strides = {H * W * D * C, 1, W * D * C, D * C, C};
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data()));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_,
data_desc_, mode_));
double this_factor = 1. - momentum;
cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION;
platform::ScopedActivationDescriptor scope_act_desc;
cudnnActivationDescriptor_t activation_desc_ =
scope_act_desc.descriptor<T>(act_type);
size_t workspace_size = 0;
size_t reserve_space_size = 0;
void *reserve_space_ptr = nullptr;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
// Create reserve space and workspace for batch norm.
// Create tensor for each batchnorm op, it will be used in the
// backward. Thus this tensor shouldn't be temp.
auto *reserve_space = ctx.Output<Tensor>("ReserveSpace");
PADDLE_ENFORCE_NOT_NULL(
reserve_space,
platform::errors::NotFound(
"The argument ReserveSpace of batch_norm op is not found."));
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnOps=*/bnOps_,
/*xDesc=*/data_desc_,
/*zDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/activation_desc_,
/*sizeInBytes=*/&workspace_size));
// -------------- cudnn batchnorm reserve space --------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetBatchNormalizationTrainingExReserveSpaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnOps=*/bnOps_,
/*activationDesc=*/activation_desc_,
/*xDesc=*/data_desc_,
/*sizeInBytes=*/&reserve_space_size));
reserve_space_ptr = reserve_space->mutable_data(ctx.GetPlace(), x->type(),
reserve_space_size);
workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(),
workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardTrainingEx(
handle, mode_, bnOps_, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(),
data_desc_, z->template data<T>(), data_desc_,
y->template data<T>(), bn_param_desc_,
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), this_factor,
mean_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
variance_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
saved_variance->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
activation_desc_, workspace_ptr, workspace_size, reserve_space_ptr,
reserve_space_size));
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
}
};
template <typename T>
class FusedBatchNormAddActGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::PreconditionNotMet("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
std::string act_type = ctx.Attr<std::string>("act_type");
const auto *x = ctx.Input<Tensor>("X");
const auto *z = ctx.Input<Tensor>("Z");
const auto *y = ctx.Input<Tensor>("Y");
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace");
const auto &in_dims = x->dims();
int N, C, H, W, D;
const DataLayout data_layout = DataLayout::kNHWC;
ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D);
// init output
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_z = ctx.Output<Tensor>(framework::GradVarName("Z"));
auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
d_x->mutable_data<T>(ctx.GetPlace());
d_z->mutable_data<T>(ctx.GetPlace());
PADDLE_ENFORCE_EQ(
d_scale && d_bias, true,
platform::errors::PreconditionNotMet(
"Both the scale grad and the bias grad must not be null."));
d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL,
platform::errors::PreconditionNotMet(
"The scale only has one dimension."));
PADDLE_ENFORCE_EQ(
scale->dims()[0], C,
platform::errors::PreconditionNotMet(
"The size of scale is equal to the channel of Input(X)."));
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
std::vector<int> dims = {N, C, H, W, D};
std::vector<int> strides = {H * W * C * D, 1, W * D * C, D * C, C};
// ------------------- cudnn descriptors ---------------------
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON);
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data()));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_,
data_desc_, mode_));
const auto *saved_mean = ctx.Input<Tensor>("SavedMean");
const auto *saved_var = ctx.Input<Tensor>("SavedVariance");
const auto *saved_mean_data =
saved_mean->template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_var->template data<BatchNormParamType<T>>();
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION;
platform::ScopedActivationDescriptor scope_act_desc;
cudnnActivationDescriptor_t activation_desc_ =
scope_act_desc.descriptor<T>(act_type);
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/bnOps_,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/data_desc_,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/activation_desc_,
/*sizeInBytes=*/&workspace_size));
workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(),
workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/bnOps_,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/x->template data<T>(),
/*yDesc=*/data_desc_,
/*yData=*/y->template data<T>(),
/*dyDesc=*/data_desc_,
/*dyData=*/d_y->template data<T>(),
/*dzDesc=*/data_desc_,
/*dzData=*/d_z->template data<T>(),
/*dxDesc=*/data_desc_,
/*dxData=*/d_x->template data<T>(),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale->template data<BatchNormParamType<T>>(),
/*bnBiasData=*/bias->template data<BatchNormParamType<T>>(),
/*dBnScaleData=*/d_scale->template data<BatchNormParamType<T>>(),
/*dBnBiasData=*/d_bias->template data<BatchNormParamType<T>>(),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesmc=*/activation_desc_,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/const_cast<T *>(reserve_space->template data<T>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
}
};
} // namespace operators
} // namespace paddle
#if CUDNN_VERSION >= 7401
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
fused_bn_add_activation,
ops::FusedBatchNormAddActKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(fused_bn_add_activation_grad,
ops::FusedBatchNormAddActGradKernel<
plat::CUDADeviceContext, plat::float16>);
#endif
| 709ae6d60db4e1e8194a1b4fd6088d956623a7be.cu | // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <cfloat>
#include <string>
#include <vector>
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/fused/fused_bn_add_activation_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/norm_utils.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/float16.h"
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T>
class FusedBatchNormAddActKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::PreconditionNotMet("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
float momentum = ctx.Attr<float>("momentum");
std::string act_type = ctx.Attr<std::string>("act_type");
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
// Get the size for each dimension.
// NHWC [batch_size, in_height, in_width, in_channels]
const auto *x = ctx.Input<Tensor>("X");
const auto *z = ctx.Input<Tensor>("Z");
const auto &in_dims = x->dims();
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto *mean_out = ctx.Output<Tensor>("MeanOut");
auto *variance_out = ctx.Output<Tensor>("VarianceOut");
mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
auto *saved_mean = ctx.Output<Tensor>("SavedMean");
auto *saved_variance = ctx.Output<Tensor>("SavedVariance");
saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
auto *y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
int N, C, H, W, D;
const DataLayout data_layout = DataLayout::kNHWC;
ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D);
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
// ------------------- cudnn descriptors ---------------------
auto handle = dev_ctx.cudnn_handle();
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
std::vector<int> dims = {N, C, H, W, D};
std::vector<int> strides = {H * W * D * C, 1, W * D * C, D * C, C};
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data()));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_,
data_desc_, mode_));
double this_factor = 1. - momentum;
cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION;
platform::ScopedActivationDescriptor scope_act_desc;
cudnnActivationDescriptor_t activation_desc_ =
scope_act_desc.descriptor<T>(act_type);
size_t workspace_size = 0;
size_t reserve_space_size = 0;
void *reserve_space_ptr = nullptr;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
// Create reserve space and workspace for batch norm.
// Create tensor for each batchnorm op, it will be used in the
// backward. Thus this tensor shouldn't be temp.
auto *reserve_space = ctx.Output<Tensor>("ReserveSpace");
PADDLE_ENFORCE_NOT_NULL(
reserve_space,
platform::errors::NotFound(
"The argument ReserveSpace of batch_norm op is not found."));
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnOps=*/bnOps_,
/*xDesc=*/data_desc_,
/*zDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/activation_desc_,
/*sizeInBytes=*/&workspace_size));
// -------------- cudnn batchnorm reserve space --------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetBatchNormalizationTrainingExReserveSpaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnOps=*/bnOps_,
/*activationDesc=*/activation_desc_,
/*xDesc=*/data_desc_,
/*sizeInBytes=*/&reserve_space_size));
reserve_space_ptr = reserve_space->mutable_data(ctx.GetPlace(), x->type(),
reserve_space_size);
workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(),
workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardTrainingEx(
handle, mode_, bnOps_, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(),
data_desc_, z->template data<T>(), data_desc_,
y->template data<T>(), bn_param_desc_,
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), this_factor,
mean_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
variance_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
saved_variance->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
activation_desc_, workspace_ptr, workspace_size, reserve_space_ptr,
reserve_space_size));
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
}
};
template <typename T>
class FusedBatchNormAddActGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::PreconditionNotMet("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
std::string act_type = ctx.Attr<std::string>("act_type");
const auto *x = ctx.Input<Tensor>("X");
const auto *z = ctx.Input<Tensor>("Z");
const auto *y = ctx.Input<Tensor>("Y");
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace");
const auto &in_dims = x->dims();
int N, C, H, W, D;
const DataLayout data_layout = DataLayout::kNHWC;
ExtractNCWHD(in_dims, data_layout, &N, &C, &H, &W, &D);
// init output
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_z = ctx.Output<Tensor>(framework::GradVarName("Z"));
auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
d_x->mutable_data<T>(ctx.GetPlace());
d_z->mutable_data<T>(ctx.GetPlace());
PADDLE_ENFORCE_EQ(
d_scale && d_bias, true,
platform::errors::PreconditionNotMet(
"Both the scale grad and the bias grad must not be null."));
d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL,
platform::errors::PreconditionNotMet(
"The scale only has one dimension."));
PADDLE_ENFORCE_EQ(
scale->dims()[0], C,
platform::errors::PreconditionNotMet(
"The size of scale is equal to the channel of Input(X)."));
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
std::vector<int> dims = {N, C, H, W, D};
std::vector<int> strides = {H * W * C * D, 1, W * D * C, D * C, C};
// ------------------- cudnn descriptors ---------------------
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
in_dims.size() > 3 ? in_dims.size() : 4, dims.data(), strides.data()));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_,
data_desc_, mode_));
const auto *saved_mean = ctx.Input<Tensor>("SavedMean");
const auto *saved_var = ctx.Input<Tensor>("SavedVariance");
const auto *saved_mean_data =
saved_mean->template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_var->template data<BatchNormParamType<T>>();
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
cudnnBatchNormOps_t bnOps_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION;
platform::ScopedActivationDescriptor scope_act_desc;
cudnnActivationDescriptor_t activation_desc_ =
scope_act_desc.descriptor<T>(act_type);
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/bnOps_,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/data_desc_,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/activation_desc_,
/*sizeInBytes=*/&workspace_size));
workspace_ptr = workspace_tensor.mutable_data(ctx.GetPlace(), x->type(),
workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/bnOps_,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/x->template data<T>(),
/*yDesc=*/data_desc_,
/*yData=*/y->template data<T>(),
/*dyDesc=*/data_desc_,
/*dyData=*/d_y->template data<T>(),
/*dzDesc=*/data_desc_,
/*dzData=*/d_z->template data<T>(),
/*dxDesc=*/data_desc_,
/*dxData=*/d_x->template data<T>(),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale->template data<BatchNormParamType<T>>(),
/*bnBiasData=*/bias->template data<BatchNormParamType<T>>(),
/*dBnScaleData=*/d_scale->template data<BatchNormParamType<T>>(),
/*dBnBiasData=*/d_bias->template data<BatchNormParamType<T>>(),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesmc=*/activation_desc_,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/const_cast<T *>(reserve_space->template data<T>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
}
};
} // namespace operators
} // namespace paddle
#if CUDNN_VERSION >= 7401
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
fused_bn_add_activation,
ops::FusedBatchNormAddActKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(fused_bn_add_activation_grad,
ops::FusedBatchNormAddActGradKernel<
plat::CUDADeviceContext, plat::float16>);
#endif
|
335414e58b704522507a3501ec17db53dd7d1abf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: echo "GPU binary would be here" > %t
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
// RUN: -fcuda-include-gpubinary %t -o - -x hip\
// RUN: | FileCheck -check-prefixes=CHECK,GNU %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
// RUN: -fcuda-include-gpubinary %t -o - -x hip\
// RUN: | FileCheck -check-prefix=NEG %s
// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \
// RUN: -aux-triple amdgcn-amd-amdhsa -fcuda-include-gpubinary \
// RUN: %t -o - -x hip\
// RUN: | FileCheck -check-prefixes=CHECK,MSVC %s
// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \
// RUN: -aux-triple amdgcn-amd-amdhsa -fcuda-include-gpubinary \
// RUN: %t -o - -x hip\
// RUN: | FileCheck -check-prefix=NEG %s
#include "Inputs/cuda.h"
// Check kernel handles are emitted for non-MSVC target but not for MSVC target.
// GNU: @[[HCKERN:ckernel]] = constant ptr @[[CSTUB:__device_stub__ckernel]], align 8
// GNU: @[[HNSKERN:_ZN2ns8nskernelEv]] = constant ptr @[[NSSTUB:_ZN2ns23__device_stub__nskernelEv]], align 8
// GNU: @[[HTKERN:_Z10kernelfuncIiEvv]] = linkonce_odr constant ptr @[[TSTUB:_Z25__device_stub__kernelfuncIiEvv]], comdat, align 8
// GNU: @[[HDKERN:_Z11kernel_declv]] = external constant ptr, align 8
// MSVC: @[[HCKERN:ckernel]] = dso_local constant ptr @[[CSTUB:__device_stub__ckernel]], align 8
// MSVC: @[[HNSKERN:"\?nskernel@ns@@YAXXZ.*"]] = dso_local constant ptr @[[NSSTUB:"\?__device_stub__nskernel@ns@@YAXXZ"]], align 8
// MSVC: @[[HTKERN:"\?\?\$kernelfunc@H@@YAXXZ.*"]] = linkonce_odr dso_local constant ptr @[[TSTUB:"\?\?\$__device_stub__kernelfunc@H@@YAXXZ.*"]], comdat, align 8
// MSVC: @[[HDKERN:"\?kernel_decl@@YAXXZ.*"]] = external dso_local constant ptr, align 8
extern "C" __global__ void ckernel() {}
namespace ns {
__global__ void nskernel() {}
} // namespace ns
template<class T>
__global__ void kernelfunc() {}
__global__ void kernel_decl();
extern "C" void (*kernel_ptr)();
extern "C" void *void_ptr;
extern "C" void launch(void *kern);
// Device side kernel names
// CHECK: @[[CKERN:[0-9]*]] = {{.*}} c"ckernel\00"
// CHECK: @[[NSKERN:[0-9]*]] = {{.*}} c"_ZN2ns8nskernelEv\00"
// CHECK: @[[TKERN:[0-9]*]] = {{.*}} c"_Z10kernelfuncIiEvv\00"
// Non-template kernel stub functions
// CHECK: define{{.*}}@[[CSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HCKERN]]
// CHECK: define{{.*}}@[[NSSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HNSKERN]]
// Check kernel stub is called for triple chevron.
// CHECK-LABEL: define{{.*}}@fun1()
// CHECK: call void @[[CSTUB]]()
// CHECK: call void @[[NSSTUB]]()
// CHECK: call void @[[TSTUB]]()
// GNU: call void @[[DSTUB:_Z26__device_stub__kernel_declv]]()
// MSVC: call void @[[DSTUB:"\?__device_stub__kernel_decl@@YAXXZ"]]()
extern "C" void fun1(void) {
hipLaunchKernelGGL(( ckernel), dim3(1), dim3(1), 0, 0, );
hipLaunchKernelGGL(( ns::nskernel), dim3(1), dim3(1), 0, 0, );
hipLaunchKernelGGL(( kernelfunc<int>), dim3(1), dim3(1), 0, 0, );
hipLaunchKernelGGL(( kernel_decl), dim3(1), dim3(1), 0, 0, );
}
// Template kernel stub functions
// CHECK: define{{.*}}@[[TSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HTKERN]]
// Check declaration of stub function for external kernel.
// CHECK: declare{{.*}}@[[DSTUB]]
// Check kernel handle is used for passing the kernel as a function pointer.
// CHECK-LABEL: define{{.*}}@fun2()
// CHECK: call void @launch({{.*}}[[HCKERN]]
// CHECK: call void @launch({{.*}}[[HNSKERN]]
// CHECK: call void @launch({{.*}}[[HTKERN]]
// CHECK: call void @launch({{.*}}[[HDKERN]]
extern "C" void fun2() {
launch((void *)ckernel);
launch((void *)ns::nskernel);
launch((void *)kernelfunc<int>);
launch((void *)kernel_decl);
}
// Check kernel handle is used for assigning a kernel to a function pointer.
// CHECK-LABEL: define{{.*}}@fun3()
// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr, align 8
// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr, align 8
// CHECK: store ptr @[[HCKERN]], ptr @void_ptr, align 8
// CHECK: store ptr @[[HCKERN]], ptr @void_ptr, align 8
extern "C" void fun3() {
kernel_ptr = ckernel;
kernel_ptr = &ckernel;
void_ptr = (void *)ckernel;
void_ptr = (void *)&ckernel;
}
// Check kernel stub is loaded from kernel handle when function pointer is
// used with triple chevron.
// CHECK-LABEL: define{{.*}}@fun4()
// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr
// CHECK: call noundef i32 @{{.*hipConfigureCall}}
// CHECK: %[[HANDLE:.*]] = load ptr, ptr @kernel_ptr, align 8
// CHECK: %[[STUB:.*]] = load ptr, ptr %[[HANDLE]], align 8
// CHECK: call void %[[STUB]]()
extern "C" void fun4() {
kernel_ptr = ckernel;
hipLaunchKernelGGL(( kernel_ptr), dim3(1),dim3(1), 0, 0, );
}
// Check kernel handle is passed to a function.
// CHECK-LABEL: define{{.*}}@fun5()
// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr
// CHECK: %[[HANDLE:.*]] = load ptr, ptr @kernel_ptr, align 8
// CHECK: call void @launch(ptr noundef %[[HANDLE]])
extern "C" void fun5() {
kernel_ptr = ckernel;
launch((void *)kernel_ptr);
}
// Check kernel handle is registered.
// CHECK-LABEL: define{{.*}}@__hip_register_globals
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HCKERN]]{{.*}}@[[CKERN]]
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HNSKERN]]{{.*}}@[[NSKERN]]
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HTKERN]]{{.*}}@[[TKERN]]
// NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}__device_stub
// NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}kernel_decl
| 335414e58b704522507a3501ec17db53dd7d1abf.cu | // RUN: echo "GPU binary would be here" > %t
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
// RUN: -fcuda-include-gpubinary %t -o - -x hip\
// RUN: | FileCheck -check-prefixes=CHECK,GNU %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
// RUN: -fcuda-include-gpubinary %t -o - -x hip\
// RUN: | FileCheck -check-prefix=NEG %s
// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \
// RUN: -aux-triple amdgcn-amd-amdhsa -fcuda-include-gpubinary \
// RUN: %t -o - -x hip\
// RUN: | FileCheck -check-prefixes=CHECK,MSVC %s
// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm %s \
// RUN: -aux-triple amdgcn-amd-amdhsa -fcuda-include-gpubinary \
// RUN: %t -o - -x hip\
// RUN: | FileCheck -check-prefix=NEG %s
#include "Inputs/cuda.h"
// Check kernel handles are emitted for non-MSVC target but not for MSVC target.
// GNU: @[[HCKERN:ckernel]] = constant ptr @[[CSTUB:__device_stub__ckernel]], align 8
// GNU: @[[HNSKERN:_ZN2ns8nskernelEv]] = constant ptr @[[NSSTUB:_ZN2ns23__device_stub__nskernelEv]], align 8
// GNU: @[[HTKERN:_Z10kernelfuncIiEvv]] = linkonce_odr constant ptr @[[TSTUB:_Z25__device_stub__kernelfuncIiEvv]], comdat, align 8
// GNU: @[[HDKERN:_Z11kernel_declv]] = external constant ptr, align 8
// MSVC: @[[HCKERN:ckernel]] = dso_local constant ptr @[[CSTUB:__device_stub__ckernel]], align 8
// MSVC: @[[HNSKERN:"\?nskernel@ns@@YAXXZ.*"]] = dso_local constant ptr @[[NSSTUB:"\?__device_stub__nskernel@ns@@YAXXZ"]], align 8
// MSVC: @[[HTKERN:"\?\?\$kernelfunc@H@@YAXXZ.*"]] = linkonce_odr dso_local constant ptr @[[TSTUB:"\?\?\$__device_stub__kernelfunc@H@@YAXXZ.*"]], comdat, align 8
// MSVC: @[[HDKERN:"\?kernel_decl@@YAXXZ.*"]] = external dso_local constant ptr, align 8
extern "C" __global__ void ckernel() {}
namespace ns {
__global__ void nskernel() {}
} // namespace ns
template<class T>
__global__ void kernelfunc() {}
__global__ void kernel_decl();
extern "C" void (*kernel_ptr)();
extern "C" void *void_ptr;
extern "C" void launch(void *kern);
// Device side kernel names
// CHECK: @[[CKERN:[0-9]*]] = {{.*}} c"ckernel\00"
// CHECK: @[[NSKERN:[0-9]*]] = {{.*}} c"_ZN2ns8nskernelEv\00"
// CHECK: @[[TKERN:[0-9]*]] = {{.*}} c"_Z10kernelfuncIiEvv\00"
// Non-template kernel stub functions
// CHECK: define{{.*}}@[[CSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HCKERN]]
// CHECK: define{{.*}}@[[NSSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HNSKERN]]
// Check kernel stub is called for triple chevron.
// CHECK-LABEL: define{{.*}}@fun1()
// CHECK: call void @[[CSTUB]]()
// CHECK: call void @[[NSSTUB]]()
// CHECK: call void @[[TSTUB]]()
// GNU: call void @[[DSTUB:_Z26__device_stub__kernel_declv]]()
// MSVC: call void @[[DSTUB:"\?__device_stub__kernel_decl@@YAXXZ"]]()
extern "C" void fun1(void) {
ckernel<<<1, 1>>>();
ns::nskernel<<<1, 1>>>();
kernelfunc<int><<<1, 1>>>();
kernel_decl<<<1, 1>>>();
}
// Template kernel stub functions
// CHECK: define{{.*}}@[[TSTUB]]
// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[HTKERN]]
// Check declaration of stub function for external kernel.
// CHECK: declare{{.*}}@[[DSTUB]]
// Check kernel handle is used for passing the kernel as a function pointer.
// CHECK-LABEL: define{{.*}}@fun2()
// CHECK: call void @launch({{.*}}[[HCKERN]]
// CHECK: call void @launch({{.*}}[[HNSKERN]]
// CHECK: call void @launch({{.*}}[[HTKERN]]
// CHECK: call void @launch({{.*}}[[HDKERN]]
extern "C" void fun2() {
launch((void *)ckernel);
launch((void *)ns::nskernel);
launch((void *)kernelfunc<int>);
launch((void *)kernel_decl);
}
// Check kernel handle is used for assigning a kernel to a function pointer.
// CHECK-LABEL: define{{.*}}@fun3()
// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr, align 8
// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr, align 8
// CHECK: store ptr @[[HCKERN]], ptr @void_ptr, align 8
// CHECK: store ptr @[[HCKERN]], ptr @void_ptr, align 8
extern "C" void fun3() {
kernel_ptr = ckernel;
kernel_ptr = &ckernel;
void_ptr = (void *)ckernel;
void_ptr = (void *)&ckernel;
}
// Check kernel stub is loaded from kernel handle when function pointer is
// used with triple chevron.
// CHECK-LABEL: define{{.*}}@fun4()
// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr
// CHECK: call noundef i32 @{{.*hipConfigureCall}}
// CHECK: %[[HANDLE:.*]] = load ptr, ptr @kernel_ptr, align 8
// CHECK: %[[STUB:.*]] = load ptr, ptr %[[HANDLE]], align 8
// CHECK: call void %[[STUB]]()
extern "C" void fun4() {
kernel_ptr = ckernel;
kernel_ptr<<<1,1>>>();
}
// Check kernel handle is passed to a function.
// CHECK-LABEL: define{{.*}}@fun5()
// CHECK: store ptr @[[HCKERN]], ptr @kernel_ptr
// CHECK: %[[HANDLE:.*]] = load ptr, ptr @kernel_ptr, align 8
// CHECK: call void @launch(ptr noundef %[[HANDLE]])
extern "C" void fun5() {
kernel_ptr = ckernel;
launch((void *)kernel_ptr);
}
// Check kernel handle is registered.
// CHECK-LABEL: define{{.*}}@__hip_register_globals
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HCKERN]]{{.*}}@[[CKERN]]
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HNSKERN]]{{.*}}@[[NSKERN]]
// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[HTKERN]]{{.*}}@[[TKERN]]
// NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}__device_stub
// NEG-NOT: call{{.*}}@__hipRegisterFunction{{.*}}kernel_decl
|
ec8912f86fe29b543c4a15a9cca3c0263629581e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <hip/driver_types.h>
void checkparams(unsigned long *n, unsigned int *cb);
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
#define _INT_
#ifdef _INT_
typedef int basetype; // Tipo para elementos: int
#define labelelem "ints"
#elif _DOUBLE_
typedef double basetype; // Tipo para elementos: double
#define labelelem "doubles"
#else
typedef float basetype; // Tipo para elementos: float PREDETERMINADO
#define labelelem "floats"
#endif
__constant__ unsigned long d_n ;
__constant__ basetype d_C = 10;
const unsigned long N = 134217728 * 4 ; // Nmero predeterminado de elementos en los vectores
const int CUDA_BLK = 64; // Tamao predeterminado de bloque de hilos CUDA
// Definicin de nuestro kernel para funcin cuadradoV
__global__ void constV_kernel_cuda(basetype *const cV){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < d_n)
cV[global_id] = cV[global_id]*d_C;
}
int main(int argc, char *argv[]){
// Nmero de elementos del vector (predeterminado: N 1048576)
unsigned long n = (argc > 1)?atoi (argv[1]):N;
// Nmero de hilos en cada bloque CUDA (predeterminado: CUDA_BLK 64)
unsigned int cb = (argc > 2)?atoi (argv[2]):CUDA_BLK;
checkparams(&n, &cb);
unsigned int numBytes = n * sizeof(basetype);
unsigned int i;
basetype *vectorV = (basetype *) malloc(numBytes);
// Reservamos memoria global del device (GPU) para el array y lo copiamos
double timetick;
hipError_t error;
basetype *cV;
for(i = 0; i < n; i++) {
vectorV[i] = (basetype)i;
}
hipMalloc((void **) &cV, numBytes);
hipMemcpy(cV, vectorV, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpyToSymbol(d_n, &n, sizeof(unsigned long) ); // CPU -> GPU
// Bloque unidimensional de hilos (*cb* hilos)
dim3 dimBlock(cb);
// Grid unidimensional (*ceil(n/cb)* bloques)
dim3 dimGrid((n + dimBlock.x - 1) / dimBlock.x);
timetick = dwalltime();
hipLaunchKernelGGL(( constV_kernel_cuda), dim3(dimGrid), dim3(dimBlock), 0, 0, cV);
hipDeviceSynchronize();
printf("-> Tiempo de ejecucion en GPU %f\n", dwalltime() - timetick);
error = hipGetLastError();
// Movemos resultado: GPU -> CPU
timetick = dwalltime();
hipMemcpy(vectorV, cV, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
printf("-> Tiempo de copia GPU ==>> CPU %f\n", dwalltime() - timetick);
for (i = 0; i < 20; i++){
printf("%d|",vectorV[i]);
}
printf("\n");
unsigned long temp = n-1;
for (i = 0; i < 20; i++){
printf("%d|",vectorV[temp]);
temp--;
}
printf("\n%lu",n);
printf("\n");
printf("%d\n",error);
// Liberamos memoria global del device utilizada
hipFree (cV);
free(vectorV);
}
// Funcin que ajusta el nmero de hilos, de bloques, y de bloques por hilo
// de acuerdo a las restricciones de la GPU
void checkparams(unsigned long *n, unsigned int *cb){
struct hipDeviceProp_t capabilities;
// Si menos numero total de hilos que tamao bloque, reducimos bloque
if (*cb > *n)
*cb = *n;
hipGetDeviceProperties (&capabilities, 0);
if (*cb > capabilities.maxThreadsDim[0]) {
*cb = capabilities.maxThreadsDim[0];
printf("->Nm. hilos/bloq cambiado a %d (mx por bloque para dev)\n\n",
*cb);
}
if (((*n + *cb - 1) / *cb) > capabilities.maxGridSize[0]) {
*cb = 2 * (*n - 1) / (capabilities.maxGridSize[0] - 1);
if (*cb > capabilities.maxThreadsDim[0]) {
*cb = capabilities.maxThreadsDim[0];
printf("->Nm. hilos/bloq cambiado a %d (mx por bloque para dev)\n",
*cb);
if (*n > (capabilities.maxGridSize[0] * *cb)) {
*n = capabilities.maxGridSize[0] * *cb;
printf("->Nm. total de hilos cambiado a %lu (mx por grid para \
dev)\n\n", *n);
} else {
printf("\n");
}
} else {
printf("->Nm. hilos/bloq cambiado a %d (%d mx. bloq/grid para \
dev)\n\n",
*cb, capabilities.maxGridSize[0]);
}
}
}
| ec8912f86fe29b543c4a15a9cca3c0263629581e.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <driver_types.h>
void checkparams(unsigned long *n, unsigned int *cb);
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
#define _INT_
#ifdef _INT_
typedef int basetype; // Tipo para elementos: int
#define labelelem "ints"
#elif _DOUBLE_
typedef double basetype; // Tipo para elementos: double
#define labelelem "doubles"
#else
typedef float basetype; // Tipo para elementos: float PREDETERMINADO
#define labelelem "floats"
#endif
__constant__ unsigned long d_n ;
__constant__ basetype d_C = 10;
const unsigned long N = 134217728 * 4 ; // Número predeterminado de elementos en los vectores
const int CUDA_BLK = 64; // Tamaño predeterminado de bloque de hilos CUDA
// Definición de nuestro kernel para función cuadradoV
__global__ void constV_kernel_cuda(basetype *const cV){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < d_n)
cV[global_id] = cV[global_id]*d_C;
}
int main(int argc, char *argv[]){
// Número de elementos del vector (predeterminado: N 1048576)
unsigned long n = (argc > 1)?atoi (argv[1]):N;
// Número de hilos en cada bloque CUDA (predeterminado: CUDA_BLK 64)
unsigned int cb = (argc > 2)?atoi (argv[2]):CUDA_BLK;
checkparams(&n, &cb);
unsigned int numBytes = n * sizeof(basetype);
unsigned int i;
basetype *vectorV = (basetype *) malloc(numBytes);
// Reservamos memoria global del device (GPU) para el array y lo copiamos
double timetick;
cudaError_t error;
basetype *cV;
for(i = 0; i < n; i++) {
vectorV[i] = (basetype)i;
}
cudaMalloc((void **) &cV, numBytes);
cudaMemcpy(cV, vectorV, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpyToSymbol(d_n, &n, sizeof(unsigned long) ); // CPU -> GPU
// Bloque unidimensional de hilos (*cb* hilos)
dim3 dimBlock(cb);
// Grid unidimensional (*ceil(n/cb)* bloques)
dim3 dimGrid((n + dimBlock.x - 1) / dimBlock.x);
timetick = dwalltime();
constV_kernel_cuda<<<dimGrid, dimBlock>>>(cV);
cudaThreadSynchronize();
printf("-> Tiempo de ejecucion en GPU %f\n", dwalltime() - timetick);
error = cudaGetLastError();
// Movemos resultado: GPU -> CPU
timetick = dwalltime();
cudaMemcpy(vectorV, cV, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
printf("-> Tiempo de copia GPU ==>> CPU %f\n", dwalltime() - timetick);
for (i = 0; i < 20; i++){
printf("%d|",vectorV[i]);
}
printf("\n");
unsigned long temp = n-1;
for (i = 0; i < 20; i++){
printf("%d|",vectorV[temp]);
temp--;
}
printf("\n%lu",n);
printf("\n");
printf("%d\n",error);
// Liberamos memoria global del device utilizada
cudaFree (cV);
free(vectorV);
}
// Función que ajusta el número de hilos, de bloques, y de bloques por hilo
// de acuerdo a las restricciones de la GPU
void checkparams(unsigned long *n, unsigned int *cb){
struct cudaDeviceProp capabilities;
// Si menos numero total de hilos que tamaño bloque, reducimos bloque
if (*cb > *n)
*cb = *n;
cudaGetDeviceProperties (&capabilities, 0);
if (*cb > capabilities.maxThreadsDim[0]) {
*cb = capabilities.maxThreadsDim[0];
printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n\n",
*cb);
}
if (((*n + *cb - 1) / *cb) > capabilities.maxGridSize[0]) {
*cb = 2 * (*n - 1) / (capabilities.maxGridSize[0] - 1);
if (*cb > capabilities.maxThreadsDim[0]) {
*cb = capabilities.maxThreadsDim[0];
printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n",
*cb);
if (*n > (capabilities.maxGridSize[0] * *cb)) {
*n = capabilities.maxGridSize[0] * *cb;
printf("->Núm. total de hilos cambiado a %lu (máx por grid para \
dev)\n\n", *n);
} else {
printf("\n");
}
} else {
printf("->Núm. hilos/bloq cambiado a %d (%d máx. bloq/grid para \
dev)\n\n",
*cb, capabilities.maxGridSize[0]);
}
}
}
|
20b309418c9edc097abf617ae040b5481fdb0eff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
STREAM benchmark implementation in CUDA.
COPY: a(i) = b(i)
SCALE: a(i) = q*b(i)
SUM: a(i) = b(i) + c(i)
TRIAD: a(i) = b(i) + q*c(i)
It measures the memory system on the device.
The implementation is in single precision.
Code based on the code developed by John D. McCalpin
http://www.cs.virginia.edu/stream/FTP/Code/stream.c
Written by: Massimiliano Fatica, NVIDIA Corporation
Further modifications by: Ben Cumming, CSCS
*/
#define NTIMES 20
#include <string>
#include <vector>
#include <stdio.h>
#include <float.h>
#include <limits.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/time.h>
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
typedef double real;
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
void print_help()
{
printf(
"Usage: stream [-s] [-n <elements>] [-b <blocksize>]\n\n"
" -s\n"
" Print results in SI units (by default IEC units are used)\n\n"
" -n <elements>\n"
" Put <elements> values in the arrays\n"
" (defaults to 1<<26)\n\n"
" -b <blocksize>\n"
" Use <blocksize> as the number of threads in each block\n"
" (defaults to 192)\n"
);
}
void parse_options(int argc, char** argv, bool& SI, int& N, int& blockSize)
{
// Default values
SI = false;
N = 1<<26;
blockSize = 192;
int c;
while ((c = getopt (argc, argv, "sn:b:h")) != -1)
switch (c)
{
case 's':
SI = true;
break;
case 'n':
N = std::atoi(optarg);
break;
case 'b':
blockSize = std::atoi(optarg);
break;
case 'h':
print_help();
std::exit(0);
break;
default:
print_help();
std::exit(1);
}
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
template <typename T>
__global__ void set_array(T *a, T value, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
a[idx] = value;
}
template <typename T>
__global__ void STREAM_Copy(T *a, T *b, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
b[idx] = a[idx];
}
template <typename T>
__global__ void STREAM_Scale(T *a, T *b, T scale, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
b[idx] = scale* a[idx];
}
template <typename T>
__global__ void STREAM_Add( T *a, T *b, T *c, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
c[idx] = a[idx]+b[idx];
}
template <typename T>
__global__ void STREAM_Triad( T *a, T *b, T *c, T scalar, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
c[idx] = a[idx]+scalar*b[idx];
}
int main(int argc, char** argv)
{
real *d_a, *d_b, *d_c;
int j,k;
double times[4][NTIMES];
real scalar;
std::vector<std::string> label{"Copy: ", "Scale: ", "Add: ", "Triad: "};
// Parse arguments
bool SI;
int N, blockSize;
parse_options(argc, argv, SI, N, blockSize);
printf(" STREAM Benchmark implementation in CUDA\n");
printf(" Array size (%s precision) =%7.2f MB\n", sizeof(double)==sizeof(real)?"double":"single", double(N)*double(sizeof(real))/1.e6);
/* Allocate memory on device */
hipMalloc((void**)&d_a, sizeof(real)*N);
hipMalloc((void**)&d_b, sizeof(real)*N);
hipMalloc((void**)&d_c, sizeof(real)*N);
/* Compute execution configuration */
dim3 dimBlock(blockSize);
dim3 dimGrid(N/dimBlock.x );
if( N % dimBlock.x != 0 ) dimGrid.x+=1;
printf(" using %d threads per block, %d blocks\n",dimBlock.x,dimGrid.x);
if (SI)
printf(" output in SI units (KB = 1000 B)\n");
else
printf(" output in IEC units (KiB = 1024 B)\n");
/* Initialize memory on the device */
hipLaunchKernelGGL(( set_array<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, 2.f, N);
hipLaunchKernelGGL(( set_array<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, .5f, N);
hipLaunchKernelGGL(( set_array<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_c, .5f, N);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar=3.0f;
for (k=0; k<NTIMES; k++)
{
times[0][k]= mysecond();
hipLaunchKernelGGL(( STREAM_Copy<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_c, N);
hipDeviceSynchronize();
times[0][k]= mysecond() - times[0][k];
times[1][k]= mysecond();
hipLaunchKernelGGL(( STREAM_Scale<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, d_c, scalar, N);
hipDeviceSynchronize();
times[1][k]= mysecond() - times[1][k];
times[2][k]= mysecond();
hipLaunchKernelGGL(( STREAM_Add<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_b, d_c, N);
hipDeviceSynchronize();
times[2][k]= mysecond() - times[2][k];
times[3][k]= mysecond();
hipLaunchKernelGGL(( STREAM_Triad<real>), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, d_c, d_a, scalar, N);
hipDeviceSynchronize();
times[3][k]= mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
double bytes[4] = {
2 * sizeof(real) * (double)N,
2 * sizeof(real) * (double)N,
3 * sizeof(real) * (double)N,
3 * sizeof(real) * (double)N
};
// Use right units
const double G = SI ? 1.e9 : static_cast<double>(1<<30);
printf("\nFunction Rate %s Avg time(s) Min time(s) Max time(s)\n",
SI ? "(GB/s) " : "(GiB/s)" );
printf("-----------------------------------------------------------------\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%11.4f %11.8f %11.8f %11.8f\n", label[j].c_str(),
bytes[j]/mintime[j] / G,
avgtime[j],
mintime[j],
maxtime[j]);
}
/* Free memory on device */
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
| 20b309418c9edc097abf617ae040b5481fdb0eff.cu | /*
STREAM benchmark implementation in CUDA.
COPY: a(i) = b(i)
SCALE: a(i) = q*b(i)
SUM: a(i) = b(i) + c(i)
TRIAD: a(i) = b(i) + q*c(i)
It measures the memory system on the device.
The implementation is in single precision.
Code based on the code developed by John D. McCalpin
http://www.cs.virginia.edu/stream/FTP/Code/stream.c
Written by: Massimiliano Fatica, NVIDIA Corporation
Further modifications by: Ben Cumming, CSCS
*/
#define NTIMES 20
#include <string>
#include <vector>
#include <stdio.h>
#include <float.h>
#include <limits.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/time.h>
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
typedef double real;
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
void print_help()
{
printf(
"Usage: stream [-s] [-n <elements>] [-b <blocksize>]\n\n"
" -s\n"
" Print results in SI units (by default IEC units are used)\n\n"
" -n <elements>\n"
" Put <elements> values in the arrays\n"
" (defaults to 1<<26)\n\n"
" -b <blocksize>\n"
" Use <blocksize> as the number of threads in each block\n"
" (defaults to 192)\n"
);
}
void parse_options(int argc, char** argv, bool& SI, int& N, int& blockSize)
{
// Default values
SI = false;
N = 1<<26;
blockSize = 192;
int c;
while ((c = getopt (argc, argv, "sn:b:h")) != -1)
switch (c)
{
case 's':
SI = true;
break;
case 'n':
N = std::atoi(optarg);
break;
case 'b':
blockSize = std::atoi(optarg);
break;
case 'h':
print_help();
std::exit(0);
break;
default:
print_help();
std::exit(1);
}
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
template <typename T>
__global__ void set_array(T *a, T value, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
a[idx] = value;
}
template <typename T>
__global__ void STREAM_Copy(T *a, T *b, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
b[idx] = a[idx];
}
template <typename T>
__global__ void STREAM_Scale(T *a, T *b, T scale, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
b[idx] = scale* a[idx];
}
template <typename T>
__global__ void STREAM_Add( T *a, T *b, T *c, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
c[idx] = a[idx]+b[idx];
}
template <typename T>
__global__ void STREAM_Triad( T *a, T *b, T *c, T scalar, int len)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len)
c[idx] = a[idx]+scalar*b[idx];
}
int main(int argc, char** argv)
{
real *d_a, *d_b, *d_c;
int j,k;
double times[4][NTIMES];
real scalar;
std::vector<std::string> label{"Copy: ", "Scale: ", "Add: ", "Triad: "};
// Parse arguments
bool SI;
int N, blockSize;
parse_options(argc, argv, SI, N, blockSize);
printf(" STREAM Benchmark implementation in CUDA\n");
printf(" Array size (%s precision) =%7.2f MB\n", sizeof(double)==sizeof(real)?"double":"single", double(N)*double(sizeof(real))/1.e6);
/* Allocate memory on device */
cudaMalloc((void**)&d_a, sizeof(real)*N);
cudaMalloc((void**)&d_b, sizeof(real)*N);
cudaMalloc((void**)&d_c, sizeof(real)*N);
/* Compute execution configuration */
dim3 dimBlock(blockSize);
dim3 dimGrid(N/dimBlock.x );
if( N % dimBlock.x != 0 ) dimGrid.x+=1;
printf(" using %d threads per block, %d blocks\n",dimBlock.x,dimGrid.x);
if (SI)
printf(" output in SI units (KB = 1000 B)\n");
else
printf(" output in IEC units (KiB = 1024 B)\n");
/* Initialize memory on the device */
set_array<real><<<dimGrid,dimBlock>>>(d_a, 2.f, N);
set_array<real><<<dimGrid,dimBlock>>>(d_b, .5f, N);
set_array<real><<<dimGrid,dimBlock>>>(d_c, .5f, N);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar=3.0f;
for (k=0; k<NTIMES; k++)
{
times[0][k]= mysecond();
STREAM_Copy<real><<<dimGrid,dimBlock>>>(d_a, d_c, N);
cudaThreadSynchronize();
times[0][k]= mysecond() - times[0][k];
times[1][k]= mysecond();
STREAM_Scale<real><<<dimGrid,dimBlock>>>(d_b, d_c, scalar, N);
cudaThreadSynchronize();
times[1][k]= mysecond() - times[1][k];
times[2][k]= mysecond();
STREAM_Add<real><<<dimGrid,dimBlock>>>(d_a, d_b, d_c, N);
cudaThreadSynchronize();
times[2][k]= mysecond() - times[2][k];
times[3][k]= mysecond();
STREAM_Triad<real><<<dimGrid,dimBlock>>>(d_b, d_c, d_a, scalar, N);
cudaThreadSynchronize();
times[3][k]= mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
double bytes[4] = {
2 * sizeof(real) * (double)N,
2 * sizeof(real) * (double)N,
3 * sizeof(real) * (double)N,
3 * sizeof(real) * (double)N
};
// Use right units
const double G = SI ? 1.e9 : static_cast<double>(1<<30);
printf("\nFunction Rate %s Avg time(s) Min time(s) Max time(s)\n",
SI ? "(GB/s) " : "(GiB/s)" );
printf("-----------------------------------------------------------------\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%11.4f %11.8f %11.8f %11.8f\n", label[j].c_str(),
bytes[j]/mintime[j] / G,
avgtime[j],
mintime[j],
maxtime[j]);
}
/* Free memory on device */
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
57566fdc48ef0f3e390c8e0939cd6472fbb3d51d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
//#include <hip/hip_runtime.h>
#include "util.h"
// TODO CUDA kernel implementing axpy
// y = y + alpha*x
//void axpy(int n, double alpha, const double *x, double* y)
template <typename F>
int calculate_threads_per_block(F kernel, int n) {
int block_dim, min_grid_size;
hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_dim, kernel, 0, n);
std::cout << "++++ suggested block_dim " << block_dim
<< " and " << min_grid_size << " blocks"
<< std::endl;
return block_dim;
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 16);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
hipInit(0);
std::cout << "memcopy and daxpy test of size " << n << std::endl;
double* x_device = malloc_device<double>(n);
double* y_device = malloc_device<double>(n);
double* x_host = malloc_host<double>(n, 1.5);
double* y_host = malloc_host<double>(n, 3.0);
double* y = malloc_host<double>(n, 0.0);
// copy to device
auto start = get_time();
copy_to_device<double>(x_host, x_device, n);
copy_to_device<double>(y_host, y_device, n);
auto time_H2D = get_time() - start;
// TODO calculate grid dimensions
// ignore for the first kernel writing exercise
// the hipDeviceSynchronize() functions synchronize the host and device
// so that the timings are accurate
hipDeviceSynchronize();
start = get_time();
// TODO launch kernel (alpha=2.0)
hipDeviceSynchronize();
auto time_axpy = get_time() - start;
// check for error in last kernel call
cuda_check_last_kernel("axpy kernel");
// copy result back to host
start = get_time();
copy_to_host<double>(y_device, y, n);
auto time_D2H = get_time() - start;
std::cout << "-------\ntimings\n-------" << std::endl;
std::cout << "H2D : " << time_H2D << " s" << std::endl;
std::cout << "D2H : " << time_D2H << " s" << std::endl;
std::cout << "axpy : " << time_axpy << " s" << std::endl;
std::cout << std::endl;
std::cout << "total: " << time_axpy+time_H2D+time_D2H << " s" << std::endl;
std::cout << std::endl;
std::cout << "-------\nbandwidth\n-------" << std::endl;
auto H2D_BW = size_in_bytes * 2.0 / time_H2D / (1024*1024);
auto D2H_BW = size_in_bytes / time_D2H / (1024*1024);
std::cout << "H2D BW : " << H2D_BW << " MB/s" << std::endl;
std::cout << "D2H BW : " << D2H_BW << " MB/s" << std::endl;
// check for errors
auto errors = 0;
#pragma omp parallel for reduction(+:errors)
for(auto i=0; i<n; ++i) {
if(::fabs(6.-y[i])>1e-15) {
errors++;
}
}
if(errors>0) {
std::cout << "\n============ FAILED with " << errors << " errors" << std::endl;
}
else {
std::cout << "\n============ PASSED" << std::endl;
}
hipFree(x_device);
hipFree(y_device);
free(x_host);
free(y_host);
free(y);
return 0;
}
| 57566fdc48ef0f3e390c8e0939cd6472fbb3d51d.cu | #include <iostream>
#include <cuda.h>
//#include <cuda_runtime.h>
#include "util.h"
// TODO CUDA kernel implementing axpy
// y = y + alpha*x
//void axpy(int n, double alpha, const double *x, double* y)
template <typename F>
int calculate_threads_per_block(F kernel, int n) {
int block_dim, min_grid_size;
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_dim, kernel, 0, n);
std::cout << "++++ suggested block_dim " << block_dim
<< " and " << min_grid_size << " blocks"
<< std::endl;
return block_dim;
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 16);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
cuInit(0);
std::cout << "memcopy and daxpy test of size " << n << std::endl;
double* x_device = malloc_device<double>(n);
double* y_device = malloc_device<double>(n);
double* x_host = malloc_host<double>(n, 1.5);
double* y_host = malloc_host<double>(n, 3.0);
double* y = malloc_host<double>(n, 0.0);
// copy to device
auto start = get_time();
copy_to_device<double>(x_host, x_device, n);
copy_to_device<double>(y_host, y_device, n);
auto time_H2D = get_time() - start;
// TODO calculate grid dimensions
// ignore for the first kernel writing exercise
// the cudaThreadSynchronize() functions synchronize the host and device
// so that the timings are accurate
cudaThreadSynchronize();
start = get_time();
// TODO launch kernel (alpha=2.0)
cudaThreadSynchronize();
auto time_axpy = get_time() - start;
// check for error in last kernel call
cuda_check_last_kernel("axpy kernel");
// copy result back to host
start = get_time();
copy_to_host<double>(y_device, y, n);
auto time_D2H = get_time() - start;
std::cout << "-------\ntimings\n-------" << std::endl;
std::cout << "H2D : " << time_H2D << " s" << std::endl;
std::cout << "D2H : " << time_D2H << " s" << std::endl;
std::cout << "axpy : " << time_axpy << " s" << std::endl;
std::cout << std::endl;
std::cout << "total: " << time_axpy+time_H2D+time_D2H << " s" << std::endl;
std::cout << std::endl;
std::cout << "-------\nbandwidth\n-------" << std::endl;
auto H2D_BW = size_in_bytes * 2.0 / time_H2D / (1024*1024);
auto D2H_BW = size_in_bytes / time_D2H / (1024*1024);
std::cout << "H2D BW : " << H2D_BW << " MB/s" << std::endl;
std::cout << "D2H BW : " << D2H_BW << " MB/s" << std::endl;
// check for errors
auto errors = 0;
#pragma omp parallel for reduction(+:errors)
for(auto i=0; i<n; ++i) {
if(std::fabs(6.-y[i])>1e-15) {
errors++;
}
}
if(errors>0) {
std::cout << "\n============ FAILED with " << errors << " errors" << std::endl;
}
else {
std::cout << "\n============ PASSED" << std::endl;
}
cudaFree(x_device);
cudaFree(y_device);
free(x_host);
free(y_host);
free(y);
return 0;
}
|
bf0673cb0168948a32f9b9ef488e24315b07312d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "alloc3d_gpu.h"
#include "func.h"
#include "print.h"
#include <omp.h>
__global__ void jacobi_v3_dv0(double *d_u, double *d_uOld, double *d1_uOld, double *d_f, \
double frac, double delta2, int N, int N2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (0<k && k<(N-1) && 0<j && j<(N-1) && 0<i) {
if (i == (N/2-1)) {
d_u[i*N2 + j*N + k] = frac* ( \
d_uOld[(i-1)*N2 + j*N + k] + \
d1_uOld[j*N + k] + \
d_uOld[i*N2 + (j-1)*N + k] + \
d_uOld[i*N2 + (j+1)*N + k] + \
d_uOld[i*N2 + j*N + k-1] + \
d_uOld[i*N2 + j*N + k+1] + \
delta2 * d_f[i*N2 + j*N + k]);
} else if (i < (N/2-1)) {
d_u[i*N2 + j*N + k] =frac* ( \
d_uOld[(i-1)*N2+ j*N + k] + \
d_uOld[(i+1)*N2 + j*N + k] + \
d_uOld[i*N2 + (j-1)*N + k] + \
d_uOld[i*N2 + (j+1)*N + k] + \
d_uOld[i*N2 + j*N + k-1] + \
d_uOld[i*N2 + j*N + k+1] + \
delta2 * d_f[i*N2 + j*N + k]);
}
}
}
__global__ void jacobi_v3_dv1(double *d1_u, double *d1_uOld, double *d_uOld, double *d1_f, \
double frac, double delta2, int N, int N2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (0<k && k<(N-1) && 0<j && j<(N-1) && i<((N/2)-1)) {
if (i == 0) {
d1_u[i*N2 + j*N + k] = frac* ( \
d_uOld[(N/2-1)*N2 +j*N + k] + \
d1_uOld[(i+1)*N2 +j*N + k] + \
d1_uOld[i*N2 + (j-1)*N + k] + \
d1_uOld[i*N2 + (j+1)*N + k] + \
d1_uOld[i*N2 + j*N + k-1] + \
d1_uOld[i*N2 + j*N + k+1] + \
delta2 * d1_f[i*N2 + j*N + k]);
}
else if (i > 0) {
d1_u[i*N2 + j*N + k] = frac* ( \
d1_uOld[(i-1)*N2 + j*N + k] + \
d1_uOld[(i+1)*N2 + j*N + k] + \
d1_uOld[i*N2 + (j-1)*N + k] + \
d1_uOld[i*N2 + (j+1)*N + k] + \
d1_uOld[i*N2 + j*N + k-1] + \
d1_uOld[i*N2 + j*N + k+1] + \
delta2 * d1_f[i*N2 + j*N + k]);
}
}
}
int main(int argc, char *argv[]){
int N = atoi(argv[1]);
int iter_max = atoi(argv[2]);
double start_T = atof(argv[3]);
int output_type = 4;
char *output_prefix = "poisson_j_gpu1";
char *output_ext = "";
char output_filename[FILENAME_MAX];
int N2 = N * N;
// Wake up gpu
hipSetDevice(0);
double *d_dummy;
hipMalloc((void**)&d_dummy,0);
hipSetDevice(1);
hipMalloc((void**)&d_dummy,0);
double *d_u, *d_uOld, *d_f, *d1_u, *d1_uOld, *d1_f;
double *h_u, *h_uOld, *h_f;
int size = N * N * N * sizeof(double);
int half_size = size/2;
// Pinning memory in host
hipHostMalloc((void**)&h_u, size);
hipHostMalloc((void**)&h_uOld, size);
hipHostMalloc((void**)&h_f, size);
// Initialization of the arrays
u_init(h_u, N, N2, start_T);
u_init(h_uOld, N, N2, start_T);
f_init(h_f, N, N2);
// Device 0
hipSetDevice(0);
// Device memory allocation
hipMalloc((void**)&d_u, half_size);
hipMalloc((void**)&d_uOld, half_size);
hipMalloc((void**)&d_f, half_size);
// Copy initializationf from host to device
hipMemcpy(d_u, h_u, half_size, hipMemcpyHostToDevice);
hipMemcpy(d_uOld, h_uOld, half_size, hipMemcpyHostToDevice);
hipMemcpy(d_f, h_f, half_size, hipMemcpyHostToDevice);
// Device 1
hipSetDevice(1);
// Device memory allocation
hipMalloc((void**)&d1_u, half_size);
hipMalloc((void**)&d1_uOld, half_size);
hipMalloc((void**)&d1_f, half_size);
// Copy initializationf from host to device
hipMemcpy(d1_u, h_u + N*N*N/2, half_size, hipMemcpyHostToDevice);
hipMemcpy(d1_uOld, h_uOld + N*N*N/2, half_size, hipMemcpyHostToDevice);
hipMemcpy(d1_f, h_f + N*N*N/2, half_size, hipMemcpyHostToDevice);
// Enable peer access
hipSetDevice(0);
hipDeviceEnablePeerAccess(1,0);
hipSetDevice(1);
hipDeviceEnablePeerAccess(0,0);
// kernel settings
dim3 blocksize(8,8,8);
dim3 gridsize( ceil((int) N/blocksize.x),ceil((int) N/blocksize.y),ceil((int) N/blocksize.z) );
// Jacobi max iterations loop in host
double frac = 1.0/6.0;
double delta2 = (2.0*2.0)/N2;
int it = 0;
double ts = omp_get_wtime();
while(it < iter_max){
swap(&d_uOld, &d_u);
swap(&d1_uOld, &d1_u);
hipSetDevice(0);
hipLaunchKernelGGL(( jacobi_v3_dv0), dim3(gridsize),dim3(blocksize), 0, 0, d_u, d_uOld, d1_uOld, d_f, frac, delta2, N, N2);
hipSetDevice(1);
hipLaunchKernelGGL(( jacobi_v3_dv1), dim3(gridsize),dim3(blocksize), 0, 0, d1_u, d1_uOld, d_uOld, d1_f, frac, delta2, N, N2);
hipDeviceSynchronize();
hipSetDevice(0);
hipDeviceSynchronize();
it++;
}
double te = omp_get_wtime() - ts;
// Copy back to host
hipSetDevice(0);
hipMemcpy(h_u, d_u, half_size, hipMemcpyDeviceToHost);
hipSetDevice(1);
hipMemcpy(h_u + N*N*N/2, d1_u, half_size, hipMemcpyDeviceToHost);
// Disable Peer Access
hipSetDevice(0);
hipDeviceDisablePeerAccess(1);
hipSetDevice(1);
hipDeviceDisablePeerAccess(0);
// dump results if wanted
switch(output_type) {
case 0:
// no output at all
break;
case 4:
output_ext = ".vtk";
sprintf(output_filename, "%s_%d%s", output_prefix, N, output_ext);
//fprintf(stderr, "Write VTK file to %s: ", output_filename);
print_vtk(output_filename, N, h_u);
break;
default:
fprintf(stderr, "Non-supported output type!\n");
break;
}
// Calculate effective bandwidth
double efBW = N*N*N*sizeof(double)*4*it/te/1e3;
// 4 -> read uold, f | read and write u
// Calculate it/s
double itpersec = it/te;
int kbytes = N*N*N*sizeof(double)*3/1000;
//print info
printf("%d %d %3.6f %3.6f %3.6f %3.6f\n", N, it, te, itpersec, kbytes, efBW);
//Free host and device memory
hipHostFree(h_f);
hipHostFree(h_u);
hipHostFree(h_uOld);
hipFree(d_f);
hipFree(d_u);
hipFree(d_uOld);
hipFree(d1_f);
hipFree(d1_u);
hipFree(d1_uOld);
return(0);
}
| bf0673cb0168948a32f9b9ef488e24315b07312d.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "alloc3d_gpu.h"
#include "func.h"
#include "print.h"
#include <omp.h>
__global__ void jacobi_v3_dv0(double *d_u, double *d_uOld, double *d1_uOld, double *d_f, \
double frac, double delta2, int N, int N2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (0<k && k<(N-1) && 0<j && j<(N-1) && 0<i) {
if (i == (N/2-1)) {
d_u[i*N2 + j*N + k] = frac* ( \
d_uOld[(i-1)*N2 + j*N + k] + \
d1_uOld[j*N + k] + \
d_uOld[i*N2 + (j-1)*N + k] + \
d_uOld[i*N2 + (j+1)*N + k] + \
d_uOld[i*N2 + j*N + k-1] + \
d_uOld[i*N2 + j*N + k+1] + \
delta2 * d_f[i*N2 + j*N + k]);
} else if (i < (N/2-1)) {
d_u[i*N2 + j*N + k] =frac* ( \
d_uOld[(i-1)*N2+ j*N + k] + \
d_uOld[(i+1)*N2 + j*N + k] + \
d_uOld[i*N2 + (j-1)*N + k] + \
d_uOld[i*N2 + (j+1)*N + k] + \
d_uOld[i*N2 + j*N + k-1] + \
d_uOld[i*N2 + j*N + k+1] + \
delta2 * d_f[i*N2 + j*N + k]);
}
}
}
__global__ void jacobi_v3_dv1(double *d1_u, double *d1_uOld, double *d_uOld, double *d1_f, \
double frac, double delta2, int N, int N2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (0<k && k<(N-1) && 0<j && j<(N-1) && i<((N/2)-1)) {
if (i == 0) {
d1_u[i*N2 + j*N + k] = frac* ( \
d_uOld[(N/2-1)*N2 +j*N + k] + \
d1_uOld[(i+1)*N2 +j*N + k] + \
d1_uOld[i*N2 + (j-1)*N + k] + \
d1_uOld[i*N2 + (j+1)*N + k] + \
d1_uOld[i*N2 + j*N + k-1] + \
d1_uOld[i*N2 + j*N + k+1] + \
delta2 * d1_f[i*N2 + j*N + k]);
}
else if (i > 0) {
d1_u[i*N2 + j*N + k] = frac* ( \
d1_uOld[(i-1)*N2 + j*N + k] + \
d1_uOld[(i+1)*N2 + j*N + k] + \
d1_uOld[i*N2 + (j-1)*N + k] + \
d1_uOld[i*N2 + (j+1)*N + k] + \
d1_uOld[i*N2 + j*N + k-1] + \
d1_uOld[i*N2 + j*N + k+1] + \
delta2 * d1_f[i*N2 + j*N + k]);
}
}
}
int main(int argc, char *argv[]){
int N = atoi(argv[1]);
int iter_max = atoi(argv[2]);
double start_T = atof(argv[3]);
int output_type = 4;
char *output_prefix = "poisson_j_gpu1";
char *output_ext = "";
char output_filename[FILENAME_MAX];
int N2 = N * N;
// Wake up gpu
cudaSetDevice(0);
double *d_dummy;
cudaMalloc((void**)&d_dummy,0);
cudaSetDevice(1);
cudaMalloc((void**)&d_dummy,0);
double *d_u, *d_uOld, *d_f, *d1_u, *d1_uOld, *d1_f;
double *h_u, *h_uOld, *h_f;
int size = N * N * N * sizeof(double);
int half_size = size/2;
// Pinning memory in host
cudaMallocHost((void**)&h_u, size);
cudaMallocHost((void**)&h_uOld, size);
cudaMallocHost((void**)&h_f, size);
// Initialization of the arrays
u_init(h_u, N, N2, start_T);
u_init(h_uOld, N, N2, start_T);
f_init(h_f, N, N2);
// Device 0
cudaSetDevice(0);
// Device memory allocation
cudaMalloc((void**)&d_u, half_size);
cudaMalloc((void**)&d_uOld, half_size);
cudaMalloc((void**)&d_f, half_size);
// Copy initializationf from host to device
cudaMemcpy(d_u, h_u, half_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_uOld, h_uOld, half_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_f, h_f, half_size, cudaMemcpyHostToDevice);
// Device 1
cudaSetDevice(1);
// Device memory allocation
cudaMalloc((void**)&d1_u, half_size);
cudaMalloc((void**)&d1_uOld, half_size);
cudaMalloc((void**)&d1_f, half_size);
// Copy initializationf from host to device
cudaMemcpy(d1_u, h_u + N*N*N/2, half_size, cudaMemcpyHostToDevice);
cudaMemcpy(d1_uOld, h_uOld + N*N*N/2, half_size, cudaMemcpyHostToDevice);
cudaMemcpy(d1_f, h_f + N*N*N/2, half_size, cudaMemcpyHostToDevice);
// Enable peer access
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1,0);
cudaSetDevice(1);
cudaDeviceEnablePeerAccess(0,0);
// kernel settings
dim3 blocksize(8,8,8);
dim3 gridsize( ceil((int) N/blocksize.x),ceil((int) N/blocksize.y),ceil((int) N/blocksize.z) );
// Jacobi max iterations loop in host
double frac = 1.0/6.0;
double delta2 = (2.0*2.0)/N2;
int it = 0;
double ts = omp_get_wtime();
while(it < iter_max){
swap(&d_uOld, &d_u);
swap(&d1_uOld, &d1_u);
cudaSetDevice(0);
jacobi_v3_dv0<<<gridsize,blocksize>>>(d_u, d_uOld, d1_uOld, d_f, frac, delta2, N, N2);
cudaSetDevice(1);
jacobi_v3_dv1<<<gridsize,blocksize>>>(d1_u, d1_uOld, d_uOld, d1_f, frac, delta2, N, N2);
cudaDeviceSynchronize();
cudaSetDevice(0);
cudaDeviceSynchronize();
it++;
}
double te = omp_get_wtime() - ts;
// Copy back to host
cudaSetDevice(0);
cudaMemcpy(h_u, d_u, half_size, cudaMemcpyDeviceToHost);
cudaSetDevice(1);
cudaMemcpy(h_u + N*N*N/2, d1_u, half_size, cudaMemcpyDeviceToHost);
// Disable Peer Access
cudaSetDevice(0);
cudaDeviceDisablePeerAccess(1);
cudaSetDevice(1);
cudaDeviceDisablePeerAccess(0);
// dump results if wanted
switch(output_type) {
case 0:
// no output at all
break;
case 4:
output_ext = ".vtk";
sprintf(output_filename, "%s_%d%s", output_prefix, N, output_ext);
//fprintf(stderr, "Write VTK file to %s: ", output_filename);
print_vtk(output_filename, N, h_u);
break;
default:
fprintf(stderr, "Non-supported output type!\n");
break;
}
// Calculate effective bandwidth
double efBW = N*N*N*sizeof(double)*4*it/te/1e3;
// 4 -> read uold, f | read and write u
// Calculate it/s
double itpersec = it/te;
int kbytes = N*N*N*sizeof(double)*3/1000;
//print info
printf("%d %d %3.6f %3.6f %3.6f %3.6f\n", N, it, te, itpersec, kbytes, efBW);
//Free host and device memory
cudaFreeHost(h_f);
cudaFreeHost(h_u);
cudaFreeHost(h_uOld);
cudaFree(d_f);
cudaFree(d_u);
cudaFree(d_uOld);
cudaFree(d1_f);
cudaFree(d1_u);
cudaFree(d1_uOld);
return(0);
}
|
5e1c74e08c9fa88325e20eb35b9e79c1ae3c87c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: jglaser
#include "ParticleData.cuh"
#include "ParticleGroup.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <hipcub/hipcub.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#pragma GCC diagnostic pop
/*! \file ParticleGroup.cu
\brief Contains GPU kernel code used by ParticleGroup
*/
//! GPU kernel to translate between global and local membership lookup table
__global__ void gpu_rebuild_index_list_kernel(unsigned int N,
unsigned int* d_tag,
unsigned int* d_is_member_tag,
unsigned int* d_is_member)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
unsigned int tag = d_tag[idx];
d_is_member[idx] = d_is_member_tag[tag];
}
__global__ void gpu_scatter_member_indices(unsigned int N,
const unsigned int* d_scan,
const unsigned int* d_is_member,
unsigned* d_member_idx)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
if (d_is_member[idx])
d_member_idx[d_scan[idx]] = idx;
}
//! GPU method for rebuilding the index list of a ParticleGroup
/*! \param N number of local particles
\param d_is_member_tag Global lookup table for tag -> group membership
\param d_is_member Array of membership flags
\param d_member_idx Array of member indices
\param d_tag Array of tags
\param num_local_members Number of members on the local processor (return value)
*/
hipError_t gpu_rebuild_index_list(unsigned int N,
unsigned int* d_is_member_tag,
unsigned int* d_is_member,
unsigned int* d_tag)
{
assert(d_is_member);
assert(d_is_member_tag);
assert(d_tag);
unsigned int block_size = 256;
unsigned int n_blocks = N / block_size + 1;
hipLaunchKernelGGL(gpu_rebuild_index_list_kernel,
dim3(n_blocks),
dim3(block_size),
0,
0,
N,
d_tag,
d_is_member_tag,
d_is_member);
return hipSuccess;
}
//! GPU method for compacting the group member indices
/*! \param N number of local particles
\param d_is_member_tag Global lookup table for tag -> group membership
\param d_is_member Array of membership flags
\param d_member_idx Array of member indices
\param d_tag Array of tags
\param num_local_members Number of members on the local processor (return value)
*/
hipError_t gpu_compact_index_list(unsigned int N,
unsigned int* d_is_member,
unsigned int* d_member_idx,
unsigned int& num_local_members,
unsigned int* d_tmp,
CachedAllocator& alloc)
{
assert(d_is_member);
assert(d_member_idx);
// compute member_idx offsets
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
// determine size of temporary storage
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_is_member, d_tmp, N);
d_temp_storage = alloc.getTemporaryBuffer<char>(temp_storage_bytes);
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_is_member, d_tmp, N);
alloc.deallocate((char*)d_temp_storage);
thrust::device_ptr<unsigned int> is_member(d_is_member);
#ifdef __HIP_PLATFORM_HCC__
num_local_members = thrust::reduce(thrust::hip::par(alloc),
#else
num_local_members = thrust::reduce(thrust::hip::par(alloc),
#endif
is_member,
is_member + N);
// fill member_idx array
unsigned int block_size = 256;
unsigned int n_blocks = N / block_size + 1;
hipLaunchKernelGGL(gpu_scatter_member_indices,
dim3(n_blocks),
dim3(block_size),
0,
0,
N,
d_tmp,
d_is_member,
d_member_idx);
return hipSuccess;
}
| 5e1c74e08c9fa88325e20eb35b9e79c1ae3c87c4.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: jglaser
#include "ParticleData.cuh"
#include "ParticleGroup.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <hipcub/hipcub.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#pragma GCC diagnostic pop
/*! \file ParticleGroup.cu
\brief Contains GPU kernel code used by ParticleGroup
*/
//! GPU kernel to translate between global and local membership lookup table
__global__ void gpu_rebuild_index_list_kernel(unsigned int N,
unsigned int* d_tag,
unsigned int* d_is_member_tag,
unsigned int* d_is_member)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
unsigned int tag = d_tag[idx];
d_is_member[idx] = d_is_member_tag[tag];
}
__global__ void gpu_scatter_member_indices(unsigned int N,
const unsigned int* d_scan,
const unsigned int* d_is_member,
unsigned* d_member_idx)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
if (d_is_member[idx])
d_member_idx[d_scan[idx]] = idx;
}
//! GPU method for rebuilding the index list of a ParticleGroup
/*! \param N number of local particles
\param d_is_member_tag Global lookup table for tag -> group membership
\param d_is_member Array of membership flags
\param d_member_idx Array of member indices
\param d_tag Array of tags
\param num_local_members Number of members on the local processor (return value)
*/
hipError_t gpu_rebuild_index_list(unsigned int N,
unsigned int* d_is_member_tag,
unsigned int* d_is_member,
unsigned int* d_tag)
{
assert(d_is_member);
assert(d_is_member_tag);
assert(d_tag);
unsigned int block_size = 256;
unsigned int n_blocks = N / block_size + 1;
hipLaunchKernelGGL(gpu_rebuild_index_list_kernel,
dim3(n_blocks),
dim3(block_size),
0,
0,
N,
d_tag,
d_is_member_tag,
d_is_member);
return hipSuccess;
}
//! GPU method for compacting the group member indices
/*! \param N number of local particles
\param d_is_member_tag Global lookup table for tag -> group membership
\param d_is_member Array of membership flags
\param d_member_idx Array of member indices
\param d_tag Array of tags
\param num_local_members Number of members on the local processor (return value)
*/
hipError_t gpu_compact_index_list(unsigned int N,
unsigned int* d_is_member,
unsigned int* d_member_idx,
unsigned int& num_local_members,
unsigned int* d_tmp,
CachedAllocator& alloc)
{
assert(d_is_member);
assert(d_member_idx);
// compute member_idx offsets
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
// determine size of temporary storage
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_is_member, d_tmp, N);
d_temp_storage = alloc.getTemporaryBuffer<char>(temp_storage_bytes);
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_is_member, d_tmp, N);
alloc.deallocate((char*)d_temp_storage);
thrust::device_ptr<unsigned int> is_member(d_is_member);
#ifdef __HIP_PLATFORM_HCC__
num_local_members = thrust::reduce(thrust::hip::par(alloc),
#else
num_local_members = thrust::reduce(thrust::cuda::par(alloc),
#endif
is_member,
is_member + N);
// fill member_idx array
unsigned int block_size = 256;
unsigned int n_blocks = N / block_size + 1;
hipLaunchKernelGGL(gpu_scatter_member_indices,
dim3(n_blocks),
dim3(block_size),
0,
0,
N,
d_tmp,
d_is_member,
d_member_idx);
return hipSuccess;
}
|
e01ad90fe8e57f7e03d2c0c6023c15a47c2c1273.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void vector_add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
/* fix the parameters needed to copy data to the device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( d_b, b, size, hipMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
hipLaunchKernelGGL(( add), dim3((N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK) , 0, 0, d_a, d_b, d_c );
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
| e01ad90fe8e57f7e03d2c0c6023c15a47c2c1273.cu | #include <stdio.h>
__global__ void vector_add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
/* fix the parameters needed to copy data to the device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
add<<< (N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, d_b, d_c );
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
0b57ad394f38f9db563c719a0dc789f425e605d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* -----------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the header file is for the cuSPARSE implementation of the
* SUNMATRIX module.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sunmemory/sunmemory_cuda.h>
#include <sunmatrix/sunmatrix_cusparse.h>
#include "sundials_cuda.h"
#include "sundials_debug.h"
#include "cusparse_kernels.cuh"
/* Use the namespace for the kernels */
using namespace sundials::sunmatrix_cusparse;
/* Constants */
#define ZERO RCONST(0.0)
#define ONE RCONST(1.0)
/* Private function prototypes */
static booleantype SMCompatible_cuSparse(SUNMatrix, SUNMatrix);
static SUNMatrix SUNMatrix_cuSparse_NewEmpty();
#if CUDART_VERSION >= 11000
static hipsparseStatus_t CreateSpMatDescr(SUNMatrix, hipsparseSpMatDescr_t*);
#endif
/* Macros for handling the different function names based on precision */
#if defined(SUNDIALS_DOUBLE_PRECISION)
#define cusparseXcsrmv hipsparseDcsrmv
#define CUDA_R_XF HIP_R_64F
#elif defined(SUNDIALS_SINGLE_PRECISION)
#define cusparseXcsrmv hipsparseScsrmv
#define CUDA_R_XF HIP_R_32F
#endif
/* Content accessor macros */
#define SMCU_CONTENT(A) ( (SUNMatrix_Content_cuSparse)(A->content) )
#define SMCU_ROWS(A) ( SMCU_CONTENT(A)->M )
#define SMCU_COLUMNS(A) ( SMCU_CONTENT(A)->N )
#define SMCU_NNZ(A) ( SMCU_CONTENT(A)->NNZ )
#define SMCU_NBLOCKS(A) ( SMCU_CONTENT(A)->nblocks )
#define SMCU_BLOCKROWS(A) ( SMCU_CONTENT(A)->blockrows )
#define SMCU_BLOCKCOLS(A) ( SMCU_CONTENT(A)->blockcols )
#define SMCU_BLOCKNNZ(A) ( SMCU_CONTENT(A)->blocknnz )
#define SMCU_NP(A) ( SMCU_CONTENT(A)->NP )
#define SMCU_SPARSETYPE(A) ( SMCU_CONTENT(A)->sparse_type )
#define SMCU_OWNMATD(A) ( SMCU_CONTENT(A)->own_matd )
#define SMCU_OWNEXEC(A) ( SMCU_CONTENT(A)->own_exec )
#define SMCU_DATA(A) ( SMCU_CONTENT(A)->data )
#define SMCU_DATAp(A) ( (realtype*)SMCU_CONTENT(A)->data->ptr )
#define SMCU_INDEXVALS(A) ( SMCU_CONTENT(A)->colind )
#define SMCU_INDEXPTRS(A) ( SMCU_CONTENT(A)->rowptrs )
#define SMCU_INDEXVALSp(A) ( (int*) SMCU_CONTENT(A)->colind->ptr )
#define SMCU_INDEXPTRSp(A) ( (int*) SMCU_CONTENT(A)->rowptrs->ptr )
#define SMCU_MEMHELP(A) ( SMCU_CONTENT(A)->mem_helper )
#define SMCU_MATDESCR(A) ( SMCU_CONTENT(A)->mat_descr )
#define SMCU_CUSPHANDLE(A) ( SMCU_CONTENT(A)->cusp_handle )
#define SMCU_FIXEDPATTERN(A)( SMCU_CONTENT(A)->fixed_pattern )
#define SMCU_EXECPOLICY(A) ( SMCU_CONTENT(A)->exec_policy )
/* ------------------------------------------------------------------
* Default execution policy definition.
*
* This policy tries to help us leverage the structure of the matrix.
* It will choose block sizes which are a multiple of the warp size,
* and it will choose a grid size to such that all work elements are
* covered.
* ------------------------------------------------------------------ */
class SUNCuSparseMatrixExecPolicy : public SUNCudaExecPolicy
{
public:
SUNCuSparseMatrixExecPolicy(const hipStream_t stream = 0)
: stream_(stream)
{}
SUNCuSparseMatrixExecPolicy(const SUNCuSparseMatrixExecPolicy& ex)
: stream_(ex.stream_)
{}
virtual size_t gridSize(size_t numWorkElements, size_t blockDim = 0) const
{
return(numWorkElements + blockDim - 1)/blockDim;
}
virtual size_t blockSize(size_t numWorkElements = 0, size_t gridDim = 0) const
{
return(max_block_size(CUDA_WARP_SIZE*(numWorkElements + CUDA_WARP_SIZE - 1)/CUDA_WARP_SIZE));
}
virtual const hipStream_t* stream() const
{
return(&stream_);
}
virtual CudaExecPolicy* clone() const
{
return(static_cast<CudaExecPolicy*>(new SUNCuSparseMatrixExecPolicy(*this)));
}
static size_t max_block_size(int val)
{
return((val > MAX_CUDA_BLOCKSIZE) ? MAX_CUDA_BLOCKSIZE : val );
}
private:
const hipStream_t stream_;
};
/* ------------------------------------------------------------------
* Constructors.
* ------------------------------------------------------------------ */
SUNMatrix SUNMatrix_cuSparse_NewCSR(int M, int N, int NNZ, hipsparseHandle_t cusp)
{
SUNMemory d_colind, d_rowptr, d_values;
int alloc_fail = 0;
/* return with NULL matrix on illegal input */
if ( (M <= 0) || (N <= 0) || (NNZ < 0) )
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: illegal value(s) for M, N, or NNZ\n");
return(NULL);
}
SUNMatrix A = SUNMatrix_cuSparse_NewEmpty();
if (A == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMatrix_cuSparse_NewEmpty returned NULL\n");
return(NULL);
}
SMCU_MEMHELP(A) = SUNMemoryHelper_Cuda();
if (SMCU_MEMHELP(A) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMemoryHelper_Cuda returned NULL\n");
SUNMatDestroy(A);
return(NULL);
}
/* Allocate device memory for the matrix */
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_colind,
sizeof(int)*NNZ, SUNMEMTYPE_DEVICE);
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_rowptr,
sizeof(int)*(M+1), SUNMEMTYPE_DEVICE);
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_values,
sizeof(realtype)*NNZ, SUNMEMTYPE_DEVICE);
if (alloc_fail)
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
SUNMatDestroy(A);
return(NULL);
}
/* Choose sensible defaults */
hipsparseStatus_t cusparse_status = HIPSPARSE_STATUS_SUCCESS;
hipsparseMatDescr_t mat_descr;
cusparse_status = hipsparseCreateMatDescr(&mat_descr);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
SUNMatDestroy(A);
return(NULL);
}
cusparse_status = hipsparseSetMatType(mat_descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
hipsparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
cusparse_status = hipsparseSetMatIndexBase(mat_descr, HIPSPARSE_INDEX_BASE_ZERO);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
hipsparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
hipStream_t stream;
if (!SUNDIALS_CUSPARSE_VERIFY(hipsparseGetStream(cusp, &stream)))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
hipsparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
/* Fill the content */
SMCU_CONTENT(A)->M = M;
SMCU_CONTENT(A)->N = N;
SMCU_CONTENT(A)->NNZ = NNZ;
SMCU_CONTENT(A)->nblocks = 1;
SMCU_CONTENT(A)->blockrows = M;
SMCU_CONTENT(A)->blockcols = N;
SMCU_CONTENT(A)->blocknnz = NNZ;
SMCU_CONTENT(A)->own_matd = SUNTRUE;
SMCU_CONTENT(A)->own_exec = SUNTRUE;
SMCU_CONTENT(A)->matvec_issetup = SUNFALSE;
SMCU_CONTENT(A)->fixed_pattern = SUNFALSE;
SMCU_CONTENT(A)->sparse_type = SUNMAT_CUSPARSE_CSR;
SMCU_CONTENT(A)->colind = d_colind;
SMCU_CONTENT(A)->rowptrs = d_rowptr;
SMCU_CONTENT(A)->data = d_values;
SMCU_CONTENT(A)->mat_descr = mat_descr;
SMCU_CONTENT(A)->cusp_handle = cusp;
SMCU_CONTENT(A)->exec_policy = new SUNCuSparseMatrixExecPolicy(stream);
#if CUDART_VERSION >= 11000
hipsparseSpMatDescr_t spmat_descr;
if (!SUNDIALS_CUSPARSE_VERIFY(CreateSpMatDescr(A, &spmat_descr)))
{
SUNMatDestroy(A);
return(NULL);
}
SMCU_CONTENT(A)->spmat_descr = spmat_descr;
SMCU_CONTENT(A)->dBufferMem = NULL;
SMCU_CONTENT(A)->bufferSize = 0;
SMCU_CONTENT(A)->vecX = NULL;
SMCU_CONTENT(A)->vecY = NULL;
#endif
return A;
}
SUNMatrix SUNMatrix_cuSparse_MakeCSR(hipsparseMatDescr_t mat_descr, int M, int N, int NNZ,
int *rowptrs , int *colind , realtype *data,
hipsparseHandle_t cusp)
{
/* return with NULL matrix on illegal input */
if ( (M <= 0) || (N <= 0) || (NNZ < 0) )
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: illegal value(s) for M, N, or NNZ\n");
return(NULL);
}
if ( (rowptrs == NULL) || (colind == NULL) || (data == NULL) )
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: rowptrs, colind, or data is NULL\n");
return(NULL);
}
if (cusparseGetMatIndexBase(mat_descr) != HIPSPARSE_INDEX_BASE_ZERO)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: the hipsparseMatDescr_t must have index base HIPSPARSE_INDEX_BASE_ZERO\n");
return(NULL);
}
SUNMatrix A = SUNMatrix_cuSparse_NewEmpty();
if (A == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: SUNMatrix_cuSparse_NewEmpty returned NULL\n");
return(NULL);
}
SMCU_MEMHELP(A) = SUNMemoryHelper_Cuda();
if (SMCU_MEMHELP(A) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMemoryHelper_Cuda returned NULL\n");
SUNMatDestroy(A);
return(NULL);
}
hipStream_t stream;
if (!SUNDIALS_CUSPARSE_VERIFY(hipsparseGetStream(cusp, &stream)))
{
SUNMatDestroy(A);
return(NULL);
}
/* Fill content */
SMCU_CONTENT(A)->M = M;
SMCU_CONTENT(A)->N = N;
SMCU_CONTENT(A)->NNZ = NNZ;
SMCU_CONTENT(A)->nblocks = 1;
SMCU_CONTENT(A)->blockrows = M;
SMCU_CONTENT(A)->blockcols = N;
SMCU_CONTENT(A)->blocknnz = NNZ;
SMCU_CONTENT(A)->own_matd = SUNFALSE;
SMCU_CONTENT(A)->own_exec = SUNTRUE;
SMCU_CONTENT(A)->matvec_issetup = SUNFALSE;
SMCU_CONTENT(A)->fixed_pattern = SUNFALSE;
SMCU_CONTENT(A)->sparse_type = SUNMAT_CUSPARSE_CSR;
SMCU_CONTENT(A)->colind = SUNMemoryHelper_Wrap(colind, SUNMEMTYPE_DEVICE);
SMCU_CONTENT(A)->rowptrs = SUNMemoryHelper_Wrap(rowptrs, SUNMEMTYPE_DEVICE);
SMCU_CONTENT(A)->data = SUNMemoryHelper_Wrap(data, SUNMEMTYPE_DEVICE);
SMCU_CONTENT(A)->mat_descr = mat_descr;
SMCU_CONTENT(A)->cusp_handle = cusp;
SMCU_CONTENT(A)->exec_policy = new SUNCuSparseMatrixExecPolicy(stream);
if (SMCU_CONTENT(A)->colind == NULL ||
SMCU_CONTENT(A)->rowptrs == NULL ||
SMCU_CONTENT(A)->data == NULL)
{
SUNMatDestroy(A);
return(NULL);
}
#if CUDART_VERSION >= 11000
hipsparseSpMatDescr_t spmat_descr;
if (!SUNDIALS_CUSPARSE_VERIFY(CreateSpMatDescr(A, &spmat_descr)))
{
SUNMatDestroy(A);
return(NULL);
}
SMCU_CONTENT(A)->spmat_descr = spmat_descr;
SMCU_CONTENT(A)->dBufferMem = NULL;
SMCU_CONTENT(A)->bufferSize = 0;
SMCU_CONTENT(A)->vecX = NULL;
SMCU_CONTENT(A)->vecY = NULL;
#endif
return(A);
}
SUNMatrix SUNMatrix_cuSparse_NewBlockCSR(int nblocks, int blockrows, int blockcols, int blocknnz, hipsparseHandle_t cusp)
{
SUNMemory d_colind, d_rowptr, d_values;
int M, N, NNZ;
int alloc_fail = 0;
/* Return with NULL matrix on illegal input */
if (blockrows != blockcols)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewBlockCSR: matrix must be square for the BCSR format\n");
return(NULL);
}
M = nblocks * blockrows;
N = M;
NNZ = nblocks * blocknnz;
/* Return with NULL matrix on illegal input */
if ( (M <= 0) || (N <= 0) || (NNZ < 0) )
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewBlockCSR: illegal value(s) for M, N, or NNZ\n");
return(NULL);
}
/* Allocate the SUNMatrix object */
SUNMatrix A = SUNMatrix_cuSparse_NewEmpty();
if (A == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewBlockCSR: SUNMatrix_cuSparse_NewEmpty returned NULL\n");
return(NULL);
}
SMCU_MEMHELP(A) = SUNMemoryHelper_Cuda();
if (SMCU_MEMHELP(A) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMemoryHelper_Cuda returned NULL\n");
SUNMatDestroy(A);
return(NULL);
}
/* Allocate device memory for the matrix */
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_colind,
sizeof(int)*blocknnz, SUNMEMTYPE_DEVICE);
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_rowptr,
sizeof(int)*(blockrows + 1),
SUNMEMTYPE_DEVICE);
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_values,
sizeof(realtype)*blocknnz*nblocks,
SUNMEMTYPE_DEVICE);
if (alloc_fail)
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
SUNMatDestroy(A);
return(NULL);
}
/* Choose sensible defaults */
hipsparseStatus_t cusparse_status = HIPSPARSE_STATUS_SUCCESS;
hipsparseMatDescr_t mat_descr;
cusparse_status = hipsparseCreateMatDescr(&mat_descr);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
SUNMatDestroy(A);
return(NULL);
}
cusparse_status = hipsparseSetMatType(mat_descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
hipsparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
cusparse_status = hipsparseSetMatIndexBase(mat_descr, HIPSPARSE_INDEX_BASE_ZERO);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
hipsparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
hipStream_t stream;
if (!SUNDIALS_CUSPARSE_VERIFY(hipsparseGetStream(cusp, &stream)))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
hipsparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
/* Fill the content */
SMCU_CONTENT(A)->M = M;
SMCU_CONTENT(A)->N = N;
SMCU_CONTENT(A)->NNZ = NNZ;
SMCU_CONTENT(A)->nblocks = nblocks;
SMCU_CONTENT(A)->blockrows = blockrows;
SMCU_CONTENT(A)->blockcols = blockrows;
SMCU_CONTENT(A)->blocknnz = blocknnz;
SMCU_CONTENT(A)->own_matd = SUNTRUE;
SMCU_CONTENT(A)->own_exec = SUNTRUE;
SMCU_CONTENT(A)->matvec_issetup = SUNFALSE;
SMCU_CONTENT(A)->cusp_handle = cusp;
SMCU_CONTENT(A)->fixed_pattern = SUNFALSE;
SMCU_CONTENT(A)->sparse_type = SUNMAT_CUSPARSE_BCSR;
SMCU_CONTENT(A)->colind = d_colind;
SMCU_CONTENT(A)->rowptrs = d_rowptr;
SMCU_CONTENT(A)->data = d_values;
SMCU_CONTENT(A)->mat_descr = mat_descr;
SMCU_CONTENT(A)->exec_policy = new SUNCuSparseMatrixExecPolicy(stream);
#if CUDART_VERSION >= 11000
hipsparseSpMatDescr_t spmat_descr;
if (!SUNDIALS_CUSPARSE_VERIFY(CreateSpMatDescr(A, &spmat_descr)))
{
SUNMatDestroy(A);
return(NULL);
}
SMCU_CONTENT(A)->spmat_descr = spmat_descr;
SMCU_CONTENT(A)->dBufferMem = NULL;
SMCU_CONTENT(A)->bufferSize = 0;
SMCU_CONTENT(A)->vecX = NULL;
SMCU_CONTENT(A)->vecY = NULL;
#endif
return(A);
}
/* ------------------------------------------------------------------
* Implementation specific routines.
* ------------------------------------------------------------------ */
int SUNMatrix_cuSparse_SparseType(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_SPARSETYPE(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_Rows(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_ROWS(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_Columns(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_COLUMNS(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_NNZ(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_NNZ(A));
else
return(SUNMAT_ILL_INPUT);
}
int* SUNMatrix_cuSparse_IndexPointers(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_INDEXPTRSp(A));
else
return(NULL);
}
int* SUNMatrix_cuSparse_IndexValues(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_INDEXVALSp(A));
else
return(NULL);
}
realtype* SUNMatrix_cuSparse_Data(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_DATAp(A));
else
return(NULL);
}
int SUNMatrix_cuSparse_NumBlocks(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_NBLOCKS(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_BlockRows(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_BLOCKROWS(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_BlockColumns(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_BLOCKCOLS(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_BlockNNZ(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_BLOCKNNZ(A));
else
return(SUNMAT_ILL_INPUT);
}
realtype* SUNMatrix_cuSparse_BlockData(SUNMatrix A, int blockidx)
{
realtype *matdata;
int offset;
if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE)
return(NULL);
if (blockidx >= SMCU_NBLOCKS(A))
return(NULL);
matdata = SMCU_DATAp(A);
offset = SMCU_BLOCKNNZ(A)*blockidx;
return(&matdata[offset]);
}
hipsparseMatDescr_t SUNMatrix_cuSparse_MatDescr(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_MATDESCR(A));
else
return(NULL);
}
int SUNMatrix_cuSparse_SetFixedPattern(SUNMatrix A, booleantype yesno)
{
if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE)
return(SUNMAT_ILL_INPUT);
SMCU_FIXEDPATTERN(A) = yesno;
return(SUNMAT_SUCCESS);
}
int SUNMatrix_cuSparse_SetKernelExecPolicy(SUNMatrix A, SUNCudaExecPolicy* exec_policy)
{
if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE || exec_policy == NULL)
return(SUNMAT_ILL_INPUT);
if (SMCU_OWNEXEC(A)) delete SMCU_EXECPOLICY(A);
SMCU_EXECPOLICY(A) = exec_policy;
SMCU_OWNEXEC(A) = SUNFALSE;
return(SUNMAT_SUCCESS);
}
int SUNMatrix_cuSparse_CopyToDevice(SUNMatrix dA, realtype* h_data,
int* h_idxptrs, int* h_idxvals)
{
int retval;
SUNMemory _h_data, _h_idxptrs, _h_idxvals;
const hipStream_t* stream;
int nidxvals, nidxptrs;
if (SUNMatGetID(dA) != SUNMATRIX_CUSPARSE)
return(SUNMAT_ILL_INPUT);
stream = SMCU_EXECPOLICY(dA)->stream();
if (h_data != NULL)
{
_h_data = SUNMemoryHelper_Wrap(h_data, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
SMCU_DATA(dA),
_h_data,
SMCU_NNZ(dA)*sizeof(realtype),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_data);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
switch(SMCU_SPARSETYPE(dA))
{
case SUNMAT_CUSPARSE_CSR:
nidxptrs = SMCU_ROWS(dA)+1;
nidxvals = SMCU_NNZ(dA);
break;
case SUNMAT_CUSPARSE_BCSR:
nidxptrs = SMCU_BLOCKROWS(dA)+1;
nidxvals = SMCU_BLOCKNNZ(dA);
break;
default:
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_CopyToDevice: unrecognized sparse type\n");
return(SUNMAT_ILL_INPUT);
}
if (h_idxptrs != NULL)
{
_h_idxptrs = SUNMemoryHelper_Wrap(h_idxptrs, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
SMCU_INDEXPTRS(dA),
_h_idxptrs,
nidxptrs*sizeof(int),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxptrs);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
if (h_idxvals != NULL)
{
_h_idxvals = SUNMemoryHelper_Wrap(h_idxvals, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
SMCU_INDEXVALS(dA),
_h_idxvals,
nidxvals*sizeof(int),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxvals);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
return(SUNMAT_SUCCESS);
}
int SUNMatrix_cuSparse_CopyFromDevice(SUNMatrix dA, realtype* h_data,
int* h_idxptrs, int* h_idxvals)
{
int retval;
SUNMemory _h_data, _h_idxptrs, _h_idxvals;
const hipStream_t* stream;
int nidxvals, nidxptrs;
if (SUNMatGetID(dA) != SUNMATRIX_CUSPARSE)
return(SUNMAT_ILL_INPUT);
stream = SMCU_EXECPOLICY(dA)->stream();
if (h_data != NULL)
{
_h_data = SUNMemoryHelper_Wrap(h_data, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
_h_data,
SMCU_DATA(dA),
SMCU_NNZ(dA)*sizeof(realtype),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_data);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
switch(SMCU_SPARSETYPE(dA))
{
case SUNMAT_CUSPARSE_CSR:
nidxptrs = SMCU_ROWS(dA)+1;
nidxvals = SMCU_NNZ(dA);
case SUNMAT_CUSPARSE_BCSR:
nidxptrs = SMCU_BLOCKROWS(dA)+1;
nidxvals = SMCU_BLOCKNNZ(dA);
}
if (h_idxptrs != NULL)
{
_h_idxptrs = SUNMemoryHelper_Wrap(h_idxptrs, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
_h_idxptrs,
SMCU_INDEXPTRS(dA),
nidxptrs*sizeof(int),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxptrs);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
if (h_idxvals != NULL)
{
_h_idxvals = SUNMemoryHelper_Wrap(h_idxvals, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
_h_idxvals,
SMCU_INDEXVALS(dA),
nidxvals*sizeof(int),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxvals);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
return(SUNMAT_SUCCESS);
}
/*
* -----------------------------------------------------------------
* implementation of matrix operations
* -----------------------------------------------------------------
*/
SUNMatrix_ID SUNMatGetID_cuSparse(SUNMatrix A)
{
return(SUNMATRIX_CUSPARSE);
}
/* Returns a new matrix allocated to have the same structure as A,
but it does not copy any nonzeros, column vals, or row pointers. */
SUNMatrix SUNMatClone_cuSparse(SUNMatrix A)
{
SUNMatrix B;
switch (SMCU_SPARSETYPE(A))
{
case SUNMAT_CUSPARSE_CSR:
B = SUNMatrix_cuSparse_NewCSR(SMCU_ROWS(A), SMCU_COLUMNS(A), SMCU_NNZ(A),
SMCU_CUSPHANDLE(A));
break;
case SUNMAT_CUSPARSE_BCSR:
B = SUNMatrix_cuSparse_NewBlockCSR(SMCU_NBLOCKS(A), SMCU_BLOCKROWS(A), SMCU_BLOCKCOLS(A),
SMCU_BLOCKNNZ(A), SMCU_CUSPHANDLE(A));
break;
default:
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatClone_cuSparse: sparse type not recognized\n");
B = NULL;
}
SMCU_FIXEDPATTERN(B) = SMCU_FIXEDPATTERN(A);
delete SMCU_EXECPOLICY(B);
SMCU_EXECPOLICY(B) = SMCU_EXECPOLICY(A)->clone();
return(B);
}
/* Deallocates the SUNMatrix object and all data it owns */
void SUNMatDestroy_cuSparse(SUNMatrix A)
{
if (A == NULL) return;
/* free content */
if (A->content != NULL)
{
if (SMCU_MEMHELP(A))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_DATA(A));
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_INDEXPTRS(A));
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_INDEXVALS(A));
}
else
{
SUNDIALS_DEBUG_PRINT("WARNING in SUNMatDestroy_cuSparse: mem_helper was NULL when trying to dealloc data, this could result in a memory leak\n");
}
if (SMCU_OWNMATD(A))
{
/* free hipsparseMatDescr_t */
SUNDIALS_CUSPARSE_VERIFY( hipsparseDestroyMatDescr(SMCU_MATDESCR(A)) );
}
#if CUDART_VERSION >= 11000
SUNDIALS_CUSPARSE_VERIFY( hipsparseDestroyDnVec(SMCU_CONTENT(A)->vecX) );
SUNDIALS_CUSPARSE_VERIFY( hipsparseDestroyDnVec(SMCU_CONTENT(A)->vecY) );
SUNDIALS_CUSPARSE_VERIFY( hipsparseDestroySpMat(SMCU_CONTENT(A)->spmat_descr) );
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_CONTENT(A)->dBufferMem);
#endif
if (SMCU_EXECPOLICY(A) && SMCU_OWNEXEC(A))
{
delete SMCU_EXECPOLICY(A);
SMCU_EXECPOLICY(A) = NULL;
}
SUNMemoryHelper_Destroy(SMCU_MEMHELP(A));
/* free content struct */
free(A->content);
A->content = NULL;
}
/* free ops and matrix */
if (A->ops) { free(A->ops); A->ops = NULL; }
free(A); A = NULL;
return;
}
/* Performs A_ij = 0 */
int SUNMatZero_cuSparse(SUNMatrix A)
{
hipError_t cuerr;
hipStream_t stream;
stream = *SMCU_EXECPOLICY(A)->stream();
/* set all data to zero */
cuerr = hipMemsetAsync(SMCU_DATAp(A), 0, SMCU_NNZ(A)*sizeof(realtype), stream);
if (!SUNDIALS_CUDA_VERIFY(cuerr)) return(SUNMAT_OPERATION_FAIL);
/* set all rowptrs to zero unless the sparsity pattern is fixed */
if (!SMCU_FIXEDPATTERN(A))
{
cuerr = hipMemsetAsync(SMCU_INDEXPTRSp(A), 0,
(SMCU_BLOCKROWS(A)+1)*sizeof(int),
stream);
if (!SUNDIALS_CUDA_VERIFY(cuerr)) return(SUNMAT_OPERATION_FAIL);
/* set all colind to zero */
cuerr = hipMemsetAsync(SMCU_INDEXVALSp(A), 0,
SMCU_BLOCKNNZ(A)*sizeof(int),
stream);
if (!SUNDIALS_CUDA_VERIFY(cuerr)) return(SUNMAT_OPERATION_FAIL);
}
return(SUNMAT_SUCCESS);
}
/* Copies the nonzeros, column vals, and row pointers into dst */
int SUNMatCopy_cuSparse(SUNMatrix src, SUNMatrix dst)
{
int retval;
const hipStream_t* stream;
/* Verify that src and dst are compatible */
if (!SMCompatible_cuSparse(src, dst))
return(SUNMAT_ILL_INPUT);
stream = SMCU_EXECPOLICY(src)->stream();
/* Ensure that dst is allocated with at least as
much memory as we have nonzeros in src */
if (SMCU_NNZ(dst) < SMCU_NNZ(src))
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatCopy_cuSparse: the destination matrix has less nonzeros than the source\n");
return(SUNMAT_ILL_INPUT);
}
/* Zero out dst so that copy works correctly */
if (SUNMatZero_cuSparse(dst) != SUNMAT_SUCCESS)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatCopy_cuSparse: SUNMatZero_cuSparse failed\n");
return(SUNMAT_OPERATION_FAIL);
}
/* Copy the data over */
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(src),
SMCU_DATA(dst),
SMCU_DATA(src),
SMCU_NNZ(src)*sizeof(realtype),
(void*) stream);
if (retval) return(SUNMAT_OPERATION_FAIL);
/* Copy the row pointers over */
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(src),
SMCU_INDEXPTRS(dst),
SMCU_INDEXPTRS(src),
(SMCU_BLOCKROWS(src)+1)*sizeof(int),
(void*) stream);
if (retval) return(SUNMAT_OPERATION_FAIL);
/* Copy the column indices over */
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(src),
SMCU_INDEXVALS(dst),
SMCU_INDEXVALS(src),
SMCU_BLOCKNNZ(src)*sizeof(int),
(void*) stream);
if (retval) return(SUNMAT_OPERATION_FAIL);
return(SUNMAT_SUCCESS);
}
/* Performs A = cA + I. Requires the diagonal to be allocated already. */
int SUNMatScaleAddI_cuSparse(realtype c, SUNMatrix A)
{
unsigned threadsPerBlock, gridSize;
hipStream_t stream = *SMCU_EXECPOLICY(A)->stream();
switch (SMCU_SPARSETYPE(A))
{
case SUNMAT_CUSPARSE_CSR:
/* Choose the grid size to be the number of rows in the matrix,
and then choose threadsPerBlock to be a multiple of the warp size
that results in enough threads to have one per 2 columns. */
threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_COLUMNS(A)/2);
gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_ROWS(A)*SMCU_COLUMNS(A)/2, threadsPerBlock);
hipLaunchKernelGGL(( scaleAddIKernelCSR<realtype, int>)
, dim3(gridSize), dim3(threadsPerBlock), 0, stream, SMCU_ROWS(A),
c,
SMCU_DATAp(A),
SMCU_INDEXPTRSp(A),
SMCU_INDEXVALSp(A));
break;
case SUNMAT_CUSPARSE_BCSR:
/* Choose the grid size to be the number of blocks in the matrix,
and then choose threadsPerBlock to be a multiple of the warp size
that results in enough threads to have one per row of the block. */
threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_BLOCKROWS(A));
gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_NBLOCKS(A)*SMCU_BLOCKROWS(A), threadsPerBlock);
hipLaunchKernelGGL(( scaleAddIKernelBCSR<realtype, int>)
, dim3(gridSize), dim3(threadsPerBlock), 0, stream, SMCU_BLOCKROWS(A),
SMCU_NBLOCKS(A),
SMCU_BLOCKNNZ(A),
c,
SMCU_DATAp(A),
SMCU_INDEXPTRSp(A),
SMCU_INDEXVALSp(A));
break;
default:
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatScaleAddI_cuSparse: sparse type not recognized\n");
return(SUNMAT_ILL_INPUT);
}
#ifdef SUNDIALS_DEBUG_CUDA_LASTERROR
hipDeviceSynchronize();
if (!SUNDIALS_CUDA_VERIFY(hipGetLastError())) return(SUNMAT_OPERATION_FAIL);
#endif
return(SUNMAT_SUCCESS);
}
/* Performs A = cA + B */
int SUNMatScaleAdd_cuSparse(realtype c, SUNMatrix A, SUNMatrix B)
{
hipStream_t stream;
unsigned threadsPerBlock, gridSize;
if (!SMCompatible_cuSparse(A, B))
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatScaleAdd_cuSparse: SUNMatScaleAdd_cuSparse failed\n");
return(SUNMAT_ILL_INPUT);
}
stream = *SMCU_EXECPOLICY(A)->stream();
switch (SMCU_SPARSETYPE(A))
{
case SUNMAT_CUSPARSE_CSR:
/* Choose the grid size to be the number of rows in the matrix,
and then choose threadsPerBlock to be a multiple of the warp size
that results in enough threads to have one per 2 columns. */
threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_COLUMNS(A)/2);
gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_ROWS(A)*SMCU_COLUMNS(A)/2, threadsPerBlock);
hipLaunchKernelGGL(( scaleAddKernelCSR<realtype, int>)
, dim3(gridSize), dim3(threadsPerBlock), 0, stream, SMCU_NNZ(A),
c,
SMCU_DATAp(A),
SMCU_DATAp(B));
break;
case SUNMAT_CUSPARSE_BCSR:
/* Choose the grid size to be the number of blocks in the matrix,
and then choose threadsPerBlock to be a multiple of the warp size
that results in enough threads to have one per row of the block. */
threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_BLOCKROWS(A));
gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_NBLOCKS(A)*SMCU_BLOCKROWS(A), threadsPerBlock);
hipLaunchKernelGGL(( scaleAddKernelCSR<realtype, int>)
, dim3(gridSize), dim3(threadsPerBlock), 0, stream, SMCU_NNZ(A),
c,
SMCU_DATAp(A),
SMCU_DATAp(B));
break;
default:
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatScaleAdd_cuSparse: sparse type not recognized\n");
return(SUNMAT_ILL_INPUT);
}
#ifdef SUNDIALS_DEBUG_CUDA_LASTERROR
hipDeviceSynchronize();
if (!SUNDIALS_CUDA_VERIFY(hipGetLastError())) return(SUNMAT_OPERATION_FAIL);
#endif
return(SUNMAT_SUCCESS);
}
/* Setup buffers needed for Matvec */
int SUNMatMatvecSetup_cuSparse(SUNMatrix A)
{
#if CUDART_VERSION >= 11000
realtype placeholder[1];
const realtype one = ONE;
/* Check if setup has already been done */
if (!(SMCU_CONTENT(A)->matvec_issetup))
{
SUNDIALS_CUSPARSE_VERIFY( hipsparseCreateDnVec(&SMCU_CONTENT(A)->vecX,
SMCU_COLUMNS(A),
placeholder, CUDA_R_XF) );
SUNDIALS_CUSPARSE_VERIFY( hipsparseCreateDnVec(&SMCU_CONTENT(A)->vecY,
SMCU_ROWS(A),
placeholder, CUDA_R_XF) );
SUNDIALS_CUSPARSE_VERIFY(
hipsparseSpMV_bufferSize(SMCU_CUSPHANDLE(A),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
&one, SMCU_CONTENT(A)->spmat_descr,
SMCU_CONTENT(A)->vecX, &one, SMCU_CONTENT(A)->vecY,
CUDA_R_XF, HIPSPARSE_MV_ALG_DEFAULT,
&SMCU_CONTENT(A)->bufferSize) );
if ( SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &SMCU_CONTENT(A)->dBufferMem,
SMCU_CONTENT(A)->bufferSize, SUNMEMTYPE_DEVICE) )
return(SUNMAT_OPERATION_FAIL);
}
#endif
SMCU_CONTENT(A)->matvec_issetup = SUNTRUE;
return(SUNMAT_SUCCESS);
}
/* Perform y = Ax */
int SUNMatMatvec_cuSparse(SUNMatrix A, N_Vector x, N_Vector y)
{
/* Verify that the dimensions of A, x, and y agree */
if ( (SMCU_COLUMNS(A) != N_VGetLength(x)) ||
(SMCU_ROWS(A) != N_VGetLength(y)) )
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatMatvec_cuSparse: dimensions do not agree\n");
return(SUNMAT_ILL_INPUT);
}
realtype *d_xdata = N_VGetDeviceArrayPointer(x);
realtype *d_ydata = N_VGetDeviceArrayPointer(y);
if (SMCU_SPARSETYPE(A) == SUNMAT_CUSPARSE_CSR)
{
const realtype one = ONE;
/* Zero result vector */
N_VConst(ZERO, y);
#if CUDART_VERSION >= 11000
{
/* Setup matvec if it has not been done yet */
if (!SMCU_CONTENT(A)->matvec_issetup && SUNMatMatvecSetup_cuSparse(A))
{
return(SUNMAT_OPERATION_FAIL);
}
SUNDIALS_CUSPARSE_VERIFY( hipsparseDnVecSetValues(SMCU_CONTENT(A)->vecX,
d_xdata) );
SUNDIALS_CUSPARSE_VERIFY( hipsparseDnVecSetValues(SMCU_CONTENT(A)->vecY,
d_ydata) );
SUNDIALS_CUSPARSE_VERIFY( hipsparseSpMV(SMCU_CUSPHANDLE(A),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
&one, SMCU_CONTENT(A)->spmat_descr,
SMCU_CONTENT(A)->vecX, &one,
SMCU_CONTENT(A)->vecY, CUDA_R_XF,
HIPSPARSE_MV_ALG_DEFAULT,
SMCU_CONTENT(A)->dBufferMem->ptr) );
}
#else
SUNDIALS_CUSPARSE_VERIFY(
cusparseXcsrmv(SMCU_CUSPHANDLE(A), HIPSPARSE_OPERATION_NON_TRANSPOSE,
SMCU_ROWS(A), SMCU_COLUMNS(A), SMCU_NNZ(A),
&one, SMCU_MATDESCR(A), SMCU_DATAp(A), SMCU_INDEXPTRSp(A),
SMCU_INDEXVALSp(A), d_xdata, &one, d_ydata) );
#endif
}
else if (SMCU_SPARSETYPE(A) == SUNMAT_CUSPARSE_BCSR)
{
hipStream_t stream;
unsigned gridSize, threadsPerBlock;
stream = *SMCU_EXECPOLICY(A)->stream();
/* Choose the grid size to be the number of blocks in the matrix,
and then choose threadsPerBlock to be a multiple of the warp size
that results in enough threads to have one per row of the block. */
threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_COLUMNS(A)/2);
gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_ROWS(A)*SMCU_COLUMNS(A)/2, threadsPerBlock);
hipLaunchKernelGGL(( matvecBCSR<realtype, int>)
, dim3(gridSize), dim3(threadsPerBlock), 0, stream, SMCU_BLOCKROWS(A),
SMCU_NBLOCKS(A),
SMCU_BLOCKNNZ(A),
SMCU_DATAp(A),
SMCU_INDEXPTRSp(A),
SMCU_INDEXVALSp(A),
d_xdata,
d_ydata);
}
else
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatMatvec_cuSparse: sparse type not recognized\n");
return(SUNMAT_ILL_INPUT);
}
#ifdef SUNDIALS_DEBUG_CUDA_LASTERROR
hipDeviceSynchronize();
if (!SUNDIALS_CUDA_VERIFY(hipGetLastError())) return(SUNMAT_OPERATION_FAIL);
#endif
return(SUNMAT_SUCCESS);
}
/*
* =================================================================
* private functions
* =================================================================
*/
/* -----------------------------------------------------------------
* Function to check compatibility of two sparse SUNMatrix objects
*/
static booleantype SMCompatible_cuSparse(SUNMatrix A, SUNMatrix B)
{
/* both matrices must be sparse */
if ( (SUNMatGetID(A) != SUNMATRIX_CUSPARSE) ||
(SUNMatGetID(B) != SUNMATRIX_CUSPARSE) )
return(SUNFALSE);
/* both matrices must have the same shape and sparsity type */
if (SMCU_ROWS(A) != SMCU_ROWS(B))
return(SUNFALSE);
if (SMCU_COLUMNS(A) != SMCU_COLUMNS(B))
return(SUNFALSE);
if (SMCU_SPARSETYPE(A) != SMCU_SPARSETYPE(B))
return(SUNFALSE);
return(SUNTRUE);
}
/* -----------------------------------------------------------------
* Function to create empty SUNMatrix with ops attached and
* the content structure allocated.
*/
SUNMatrix SUNMatrix_cuSparse_NewEmpty()
{
/* Create an empty matrix object */
SUNMatrix A = NULL;
A = SUNMatNewEmpty();
if (A == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewEmpty: SUNMatNewEmpty failed\n");
return(NULL);
}
/* Attach operations */
A->ops->getid = SUNMatGetID_cuSparse;
A->ops->clone = SUNMatClone_cuSparse;
A->ops->destroy = SUNMatDestroy_cuSparse;
A->ops->zero = SUNMatZero_cuSparse;
A->ops->copy = SUNMatCopy_cuSparse;
A->ops->scaleadd = SUNMatScaleAdd_cuSparse;
A->ops->scaleaddi = SUNMatScaleAddI_cuSparse;
A->ops->matvecsetup = SUNMatMatvecSetup_cuSparse;
A->ops->matvec = SUNMatMatvec_cuSparse;
/* Create content */
SUNMatrix_Content_cuSparse content = NULL;
content = (SUNMatrix_Content_cuSparse) malloc(sizeof *content);
if (content == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewEmpty: failed to malloc content\n");
SUNMatDestroy(A);
return(NULL);
}
/* Attach content */
A->content = content;
content->mem_helper = NULL;
return(A);
}
#if CUDART_VERSION >= 11000
hipsparseStatus_t CreateSpMatDescr(SUNMatrix A, hipsparseSpMatDescr_t *spmat_descr)
{
/* CUDA 11 introduced the "Generic API" and removed the cusparseXcsrmv that
works on the old hipsparseMatDescr_t and raw data arrays. However,
cuSolverSp stuff requires the hipsparseMatDescr_t still. So, we have to
create this hipsparseSpMatDescr_t *and* the hipsparseMatDescr_t. */
return(hipsparseCreateCsr(spmat_descr, SMCU_ROWS(A), SMCU_COLUMNS(A),
SMCU_NNZ(A), SMCU_INDEXPTRSp(A),
SMCU_INDEXVALSp(A), SMCU_DATAp(A),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, CUDA_R_XF));
}
#endif
| 0b57ad394f38f9db563c719a0dc789f425e605d2.cu | /*
* -----------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the header file is for the cuSPARSE implementation of the
* SUNMATRIX module.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sunmemory/sunmemory_cuda.h>
#include <sunmatrix/sunmatrix_cusparse.h>
#include "sundials_cuda.h"
#include "sundials_debug.h"
#include "cusparse_kernels.cuh"
/* Use the namespace for the kernels */
using namespace sundials::sunmatrix_cusparse;
/* Constants */
#define ZERO RCONST(0.0)
#define ONE RCONST(1.0)
/* Private function prototypes */
static booleantype SMCompatible_cuSparse(SUNMatrix, SUNMatrix);
static SUNMatrix SUNMatrix_cuSparse_NewEmpty();
#if CUDART_VERSION >= 11000
static cusparseStatus_t CreateSpMatDescr(SUNMatrix, cusparseSpMatDescr_t*);
#endif
/* Macros for handling the different function names based on precision */
#if defined(SUNDIALS_DOUBLE_PRECISION)
#define cusparseXcsrmv cusparseDcsrmv
#define CUDA_R_XF CUDA_R_64F
#elif defined(SUNDIALS_SINGLE_PRECISION)
#define cusparseXcsrmv cusparseScsrmv
#define CUDA_R_XF CUDA_R_32F
#endif
/* Content accessor macros */
#define SMCU_CONTENT(A) ( (SUNMatrix_Content_cuSparse)(A->content) )
#define SMCU_ROWS(A) ( SMCU_CONTENT(A)->M )
#define SMCU_COLUMNS(A) ( SMCU_CONTENT(A)->N )
#define SMCU_NNZ(A) ( SMCU_CONTENT(A)->NNZ )
#define SMCU_NBLOCKS(A) ( SMCU_CONTENT(A)->nblocks )
#define SMCU_BLOCKROWS(A) ( SMCU_CONTENT(A)->blockrows )
#define SMCU_BLOCKCOLS(A) ( SMCU_CONTENT(A)->blockcols )
#define SMCU_BLOCKNNZ(A) ( SMCU_CONTENT(A)->blocknnz )
#define SMCU_NP(A) ( SMCU_CONTENT(A)->NP )
#define SMCU_SPARSETYPE(A) ( SMCU_CONTENT(A)->sparse_type )
#define SMCU_OWNMATD(A) ( SMCU_CONTENT(A)->own_matd )
#define SMCU_OWNEXEC(A) ( SMCU_CONTENT(A)->own_exec )
#define SMCU_DATA(A) ( SMCU_CONTENT(A)->data )
#define SMCU_DATAp(A) ( (realtype*)SMCU_CONTENT(A)->data->ptr )
#define SMCU_INDEXVALS(A) ( SMCU_CONTENT(A)->colind )
#define SMCU_INDEXPTRS(A) ( SMCU_CONTENT(A)->rowptrs )
#define SMCU_INDEXVALSp(A) ( (int*) SMCU_CONTENT(A)->colind->ptr )
#define SMCU_INDEXPTRSp(A) ( (int*) SMCU_CONTENT(A)->rowptrs->ptr )
#define SMCU_MEMHELP(A) ( SMCU_CONTENT(A)->mem_helper )
#define SMCU_MATDESCR(A) ( SMCU_CONTENT(A)->mat_descr )
#define SMCU_CUSPHANDLE(A) ( SMCU_CONTENT(A)->cusp_handle )
#define SMCU_FIXEDPATTERN(A)( SMCU_CONTENT(A)->fixed_pattern )
#define SMCU_EXECPOLICY(A) ( SMCU_CONTENT(A)->exec_policy )
/* ------------------------------------------------------------------
* Default execution policy definition.
*
* This policy tries to help us leverage the structure of the matrix.
* It will choose block sizes which are a multiple of the warp size,
* and it will choose a grid size to such that all work elements are
* covered.
* ------------------------------------------------------------------ */
class SUNCuSparseMatrixExecPolicy : public SUNCudaExecPolicy
{
public:
SUNCuSparseMatrixExecPolicy(const cudaStream_t stream = 0)
: stream_(stream)
{}
SUNCuSparseMatrixExecPolicy(const SUNCuSparseMatrixExecPolicy& ex)
: stream_(ex.stream_)
{}
virtual size_t gridSize(size_t numWorkElements, size_t blockDim = 0) const
{
return(numWorkElements + blockDim - 1)/blockDim;
}
virtual size_t blockSize(size_t numWorkElements = 0, size_t gridDim = 0) const
{
return(max_block_size(CUDA_WARP_SIZE*(numWorkElements + CUDA_WARP_SIZE - 1)/CUDA_WARP_SIZE));
}
virtual const cudaStream_t* stream() const
{
return(&stream_);
}
virtual CudaExecPolicy* clone() const
{
return(static_cast<CudaExecPolicy*>(new SUNCuSparseMatrixExecPolicy(*this)));
}
static size_t max_block_size(int val)
{
return((val > MAX_CUDA_BLOCKSIZE) ? MAX_CUDA_BLOCKSIZE : val );
}
private:
const cudaStream_t stream_;
};
/* ------------------------------------------------------------------
* Constructors.
* ------------------------------------------------------------------ */
SUNMatrix SUNMatrix_cuSparse_NewCSR(int M, int N, int NNZ, cusparseHandle_t cusp)
{
SUNMemory d_colind, d_rowptr, d_values;
int alloc_fail = 0;
/* return with NULL matrix on illegal input */
if ( (M <= 0) || (N <= 0) || (NNZ < 0) )
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: illegal value(s) for M, N, or NNZ\n");
return(NULL);
}
SUNMatrix A = SUNMatrix_cuSparse_NewEmpty();
if (A == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMatrix_cuSparse_NewEmpty returned NULL\n");
return(NULL);
}
SMCU_MEMHELP(A) = SUNMemoryHelper_Cuda();
if (SMCU_MEMHELP(A) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMemoryHelper_Cuda returned NULL\n");
SUNMatDestroy(A);
return(NULL);
}
/* Allocate device memory for the matrix */
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_colind,
sizeof(int)*NNZ, SUNMEMTYPE_DEVICE);
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_rowptr,
sizeof(int)*(M+1), SUNMEMTYPE_DEVICE);
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_values,
sizeof(realtype)*NNZ, SUNMEMTYPE_DEVICE);
if (alloc_fail)
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
SUNMatDestroy(A);
return(NULL);
}
/* Choose sensible defaults */
cusparseStatus_t cusparse_status = CUSPARSE_STATUS_SUCCESS;
cusparseMatDescr_t mat_descr;
cusparse_status = cusparseCreateMatDescr(&mat_descr);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
SUNMatDestroy(A);
return(NULL);
}
cusparse_status = cusparseSetMatType(mat_descr, CUSPARSE_MATRIX_TYPE_GENERAL);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
cusparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
cusparse_status = cusparseSetMatIndexBase(mat_descr, CUSPARSE_INDEX_BASE_ZERO);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
cusparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
cudaStream_t stream;
if (!SUNDIALS_CUSPARSE_VERIFY(cusparseGetStream(cusp, &stream)))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
cusparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
/* Fill the content */
SMCU_CONTENT(A)->M = M;
SMCU_CONTENT(A)->N = N;
SMCU_CONTENT(A)->NNZ = NNZ;
SMCU_CONTENT(A)->nblocks = 1;
SMCU_CONTENT(A)->blockrows = M;
SMCU_CONTENT(A)->blockcols = N;
SMCU_CONTENT(A)->blocknnz = NNZ;
SMCU_CONTENT(A)->own_matd = SUNTRUE;
SMCU_CONTENT(A)->own_exec = SUNTRUE;
SMCU_CONTENT(A)->matvec_issetup = SUNFALSE;
SMCU_CONTENT(A)->fixed_pattern = SUNFALSE;
SMCU_CONTENT(A)->sparse_type = SUNMAT_CUSPARSE_CSR;
SMCU_CONTENT(A)->colind = d_colind;
SMCU_CONTENT(A)->rowptrs = d_rowptr;
SMCU_CONTENT(A)->data = d_values;
SMCU_CONTENT(A)->mat_descr = mat_descr;
SMCU_CONTENT(A)->cusp_handle = cusp;
SMCU_CONTENT(A)->exec_policy = new SUNCuSparseMatrixExecPolicy(stream);
#if CUDART_VERSION >= 11000
cusparseSpMatDescr_t spmat_descr;
if (!SUNDIALS_CUSPARSE_VERIFY(CreateSpMatDescr(A, &spmat_descr)))
{
SUNMatDestroy(A);
return(NULL);
}
SMCU_CONTENT(A)->spmat_descr = spmat_descr;
SMCU_CONTENT(A)->dBufferMem = NULL;
SMCU_CONTENT(A)->bufferSize = 0;
SMCU_CONTENT(A)->vecX = NULL;
SMCU_CONTENT(A)->vecY = NULL;
#endif
return A;
}
SUNMatrix SUNMatrix_cuSparse_MakeCSR(cusparseMatDescr_t mat_descr, int M, int N, int NNZ,
int *rowptrs , int *colind , realtype *data,
cusparseHandle_t cusp)
{
/* return with NULL matrix on illegal input */
if ( (M <= 0) || (N <= 0) || (NNZ < 0) )
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: illegal value(s) for M, N, or NNZ\n");
return(NULL);
}
if ( (rowptrs == NULL) || (colind == NULL) || (data == NULL) )
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: rowptrs, colind, or data is NULL\n");
return(NULL);
}
if (cusparseGetMatIndexBase(mat_descr) != CUSPARSE_INDEX_BASE_ZERO)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: the cusparseMatDescr_t must have index base CUSPARSE_INDEX_BASE_ZERO\n");
return(NULL);
}
SUNMatrix A = SUNMatrix_cuSparse_NewEmpty();
if (A == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: SUNMatrix_cuSparse_NewEmpty returned NULL\n");
return(NULL);
}
SMCU_MEMHELP(A) = SUNMemoryHelper_Cuda();
if (SMCU_MEMHELP(A) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMemoryHelper_Cuda returned NULL\n");
SUNMatDestroy(A);
return(NULL);
}
cudaStream_t stream;
if (!SUNDIALS_CUSPARSE_VERIFY(cusparseGetStream(cusp, &stream)))
{
SUNMatDestroy(A);
return(NULL);
}
/* Fill content */
SMCU_CONTENT(A)->M = M;
SMCU_CONTENT(A)->N = N;
SMCU_CONTENT(A)->NNZ = NNZ;
SMCU_CONTENT(A)->nblocks = 1;
SMCU_CONTENT(A)->blockrows = M;
SMCU_CONTENT(A)->blockcols = N;
SMCU_CONTENT(A)->blocknnz = NNZ;
SMCU_CONTENT(A)->own_matd = SUNFALSE;
SMCU_CONTENT(A)->own_exec = SUNTRUE;
SMCU_CONTENT(A)->matvec_issetup = SUNFALSE;
SMCU_CONTENT(A)->fixed_pattern = SUNFALSE;
SMCU_CONTENT(A)->sparse_type = SUNMAT_CUSPARSE_CSR;
SMCU_CONTENT(A)->colind = SUNMemoryHelper_Wrap(colind, SUNMEMTYPE_DEVICE);
SMCU_CONTENT(A)->rowptrs = SUNMemoryHelper_Wrap(rowptrs, SUNMEMTYPE_DEVICE);
SMCU_CONTENT(A)->data = SUNMemoryHelper_Wrap(data, SUNMEMTYPE_DEVICE);
SMCU_CONTENT(A)->mat_descr = mat_descr;
SMCU_CONTENT(A)->cusp_handle = cusp;
SMCU_CONTENT(A)->exec_policy = new SUNCuSparseMatrixExecPolicy(stream);
if (SMCU_CONTENT(A)->colind == NULL ||
SMCU_CONTENT(A)->rowptrs == NULL ||
SMCU_CONTENT(A)->data == NULL)
{
SUNMatDestroy(A);
return(NULL);
}
#if CUDART_VERSION >= 11000
cusparseSpMatDescr_t spmat_descr;
if (!SUNDIALS_CUSPARSE_VERIFY(CreateSpMatDescr(A, &spmat_descr)))
{
SUNMatDestroy(A);
return(NULL);
}
SMCU_CONTENT(A)->spmat_descr = spmat_descr;
SMCU_CONTENT(A)->dBufferMem = NULL;
SMCU_CONTENT(A)->bufferSize = 0;
SMCU_CONTENT(A)->vecX = NULL;
SMCU_CONTENT(A)->vecY = NULL;
#endif
return(A);
}
SUNMatrix SUNMatrix_cuSparse_NewBlockCSR(int nblocks, int blockrows, int blockcols, int blocknnz, cusparseHandle_t cusp)
{
SUNMemory d_colind, d_rowptr, d_values;
int M, N, NNZ;
int alloc_fail = 0;
/* Return with NULL matrix on illegal input */
if (blockrows != blockcols)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewBlockCSR: matrix must be square for the BCSR format\n");
return(NULL);
}
M = nblocks * blockrows;
N = M;
NNZ = nblocks * blocknnz;
/* Return with NULL matrix on illegal input */
if ( (M <= 0) || (N <= 0) || (NNZ < 0) )
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewBlockCSR: illegal value(s) for M, N, or NNZ\n");
return(NULL);
}
/* Allocate the SUNMatrix object */
SUNMatrix A = SUNMatrix_cuSparse_NewEmpty();
if (A == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewBlockCSR: SUNMatrix_cuSparse_NewEmpty returned NULL\n");
return(NULL);
}
SMCU_MEMHELP(A) = SUNMemoryHelper_Cuda();
if (SMCU_MEMHELP(A) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMemoryHelper_Cuda returned NULL\n");
SUNMatDestroy(A);
return(NULL);
}
/* Allocate device memory for the matrix */
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_colind,
sizeof(int)*blocknnz, SUNMEMTYPE_DEVICE);
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_rowptr,
sizeof(int)*(blockrows + 1),
SUNMEMTYPE_DEVICE);
alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_values,
sizeof(realtype)*blocknnz*nblocks,
SUNMEMTYPE_DEVICE);
if (alloc_fail)
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
SUNMatDestroy(A);
return(NULL);
}
/* Choose sensible defaults */
cusparseStatus_t cusparse_status = CUSPARSE_STATUS_SUCCESS;
cusparseMatDescr_t mat_descr;
cusparse_status = cusparseCreateMatDescr(&mat_descr);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
SUNMatDestroy(A);
return(NULL);
}
cusparse_status = cusparseSetMatType(mat_descr, CUSPARSE_MATRIX_TYPE_GENERAL);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
cusparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
cusparse_status = cusparseSetMatIndexBase(mat_descr, CUSPARSE_INDEX_BASE_ZERO);
if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
cusparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
cudaStream_t stream;
if (!SUNDIALS_CUSPARSE_VERIFY(cusparseGetStream(cusp, &stream)))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values);
cusparseDestroyMatDescr(mat_descr);
SUNMatDestroy(A);
return(NULL);
}
/* Fill the content */
SMCU_CONTENT(A)->M = M;
SMCU_CONTENT(A)->N = N;
SMCU_CONTENT(A)->NNZ = NNZ;
SMCU_CONTENT(A)->nblocks = nblocks;
SMCU_CONTENT(A)->blockrows = blockrows;
SMCU_CONTENT(A)->blockcols = blockrows;
SMCU_CONTENT(A)->blocknnz = blocknnz;
SMCU_CONTENT(A)->own_matd = SUNTRUE;
SMCU_CONTENT(A)->own_exec = SUNTRUE;
SMCU_CONTENT(A)->matvec_issetup = SUNFALSE;
SMCU_CONTENT(A)->cusp_handle = cusp;
SMCU_CONTENT(A)->fixed_pattern = SUNFALSE;
SMCU_CONTENT(A)->sparse_type = SUNMAT_CUSPARSE_BCSR;
SMCU_CONTENT(A)->colind = d_colind;
SMCU_CONTENT(A)->rowptrs = d_rowptr;
SMCU_CONTENT(A)->data = d_values;
SMCU_CONTENT(A)->mat_descr = mat_descr;
SMCU_CONTENT(A)->exec_policy = new SUNCuSparseMatrixExecPolicy(stream);
#if CUDART_VERSION >= 11000
cusparseSpMatDescr_t spmat_descr;
if (!SUNDIALS_CUSPARSE_VERIFY(CreateSpMatDescr(A, &spmat_descr)))
{
SUNMatDestroy(A);
return(NULL);
}
SMCU_CONTENT(A)->spmat_descr = spmat_descr;
SMCU_CONTENT(A)->dBufferMem = NULL;
SMCU_CONTENT(A)->bufferSize = 0;
SMCU_CONTENT(A)->vecX = NULL;
SMCU_CONTENT(A)->vecY = NULL;
#endif
return(A);
}
/* ------------------------------------------------------------------
* Implementation specific routines.
* ------------------------------------------------------------------ */
int SUNMatrix_cuSparse_SparseType(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_SPARSETYPE(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_Rows(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_ROWS(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_Columns(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_COLUMNS(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_NNZ(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_NNZ(A));
else
return(SUNMAT_ILL_INPUT);
}
int* SUNMatrix_cuSparse_IndexPointers(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_INDEXPTRSp(A));
else
return(NULL);
}
int* SUNMatrix_cuSparse_IndexValues(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_INDEXVALSp(A));
else
return(NULL);
}
realtype* SUNMatrix_cuSparse_Data(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_DATAp(A));
else
return(NULL);
}
int SUNMatrix_cuSparse_NumBlocks(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_NBLOCKS(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_BlockRows(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_BLOCKROWS(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_BlockColumns(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_BLOCKCOLS(A));
else
return(SUNMAT_ILL_INPUT);
}
int SUNMatrix_cuSparse_BlockNNZ(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_BLOCKNNZ(A));
else
return(SUNMAT_ILL_INPUT);
}
realtype* SUNMatrix_cuSparse_BlockData(SUNMatrix A, int blockidx)
{
realtype *matdata;
int offset;
if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE)
return(NULL);
if (blockidx >= SMCU_NBLOCKS(A))
return(NULL);
matdata = SMCU_DATAp(A);
offset = SMCU_BLOCKNNZ(A)*blockidx;
return(&matdata[offset]);
}
cusparseMatDescr_t SUNMatrix_cuSparse_MatDescr(SUNMatrix A)
{
if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE)
return(SMCU_MATDESCR(A));
else
return(NULL);
}
int SUNMatrix_cuSparse_SetFixedPattern(SUNMatrix A, booleantype yesno)
{
if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE)
return(SUNMAT_ILL_INPUT);
SMCU_FIXEDPATTERN(A) = yesno;
return(SUNMAT_SUCCESS);
}
int SUNMatrix_cuSparse_SetKernelExecPolicy(SUNMatrix A, SUNCudaExecPolicy* exec_policy)
{
if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE || exec_policy == NULL)
return(SUNMAT_ILL_INPUT);
if (SMCU_OWNEXEC(A)) delete SMCU_EXECPOLICY(A);
SMCU_EXECPOLICY(A) = exec_policy;
SMCU_OWNEXEC(A) = SUNFALSE;
return(SUNMAT_SUCCESS);
}
int SUNMatrix_cuSparse_CopyToDevice(SUNMatrix dA, realtype* h_data,
int* h_idxptrs, int* h_idxvals)
{
int retval;
SUNMemory _h_data, _h_idxptrs, _h_idxvals;
const cudaStream_t* stream;
int nidxvals, nidxptrs;
if (SUNMatGetID(dA) != SUNMATRIX_CUSPARSE)
return(SUNMAT_ILL_INPUT);
stream = SMCU_EXECPOLICY(dA)->stream();
if (h_data != NULL)
{
_h_data = SUNMemoryHelper_Wrap(h_data, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
SMCU_DATA(dA),
_h_data,
SMCU_NNZ(dA)*sizeof(realtype),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_data);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
switch(SMCU_SPARSETYPE(dA))
{
case SUNMAT_CUSPARSE_CSR:
nidxptrs = SMCU_ROWS(dA)+1;
nidxvals = SMCU_NNZ(dA);
break;
case SUNMAT_CUSPARSE_BCSR:
nidxptrs = SMCU_BLOCKROWS(dA)+1;
nidxvals = SMCU_BLOCKNNZ(dA);
break;
default:
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_CopyToDevice: unrecognized sparse type\n");
return(SUNMAT_ILL_INPUT);
}
if (h_idxptrs != NULL)
{
_h_idxptrs = SUNMemoryHelper_Wrap(h_idxptrs, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
SMCU_INDEXPTRS(dA),
_h_idxptrs,
nidxptrs*sizeof(int),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxptrs);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
if (h_idxvals != NULL)
{
_h_idxvals = SUNMemoryHelper_Wrap(h_idxvals, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
SMCU_INDEXVALS(dA),
_h_idxvals,
nidxvals*sizeof(int),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxvals);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
return(SUNMAT_SUCCESS);
}
int SUNMatrix_cuSparse_CopyFromDevice(SUNMatrix dA, realtype* h_data,
int* h_idxptrs, int* h_idxvals)
{
int retval;
SUNMemory _h_data, _h_idxptrs, _h_idxvals;
const cudaStream_t* stream;
int nidxvals, nidxptrs;
if (SUNMatGetID(dA) != SUNMATRIX_CUSPARSE)
return(SUNMAT_ILL_INPUT);
stream = SMCU_EXECPOLICY(dA)->stream();
if (h_data != NULL)
{
_h_data = SUNMemoryHelper_Wrap(h_data, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
_h_data,
SMCU_DATA(dA),
SMCU_NNZ(dA)*sizeof(realtype),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_data);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
switch(SMCU_SPARSETYPE(dA))
{
case SUNMAT_CUSPARSE_CSR:
nidxptrs = SMCU_ROWS(dA)+1;
nidxvals = SMCU_NNZ(dA);
case SUNMAT_CUSPARSE_BCSR:
nidxptrs = SMCU_BLOCKROWS(dA)+1;
nidxvals = SMCU_BLOCKNNZ(dA);
}
if (h_idxptrs != NULL)
{
_h_idxptrs = SUNMemoryHelper_Wrap(h_idxptrs, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
_h_idxptrs,
SMCU_INDEXPTRS(dA),
nidxptrs*sizeof(int),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxptrs);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
if (h_idxvals != NULL)
{
_h_idxvals = SUNMemoryHelper_Wrap(h_idxvals, SUNMEMTYPE_HOST);
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA),
_h_idxvals,
SMCU_INDEXVALS(dA),
nidxvals*sizeof(int),
(void*) stream);
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxvals);
if (retval != 0) return(SUNMAT_OPERATION_FAIL);
}
return(SUNMAT_SUCCESS);
}
/*
* -----------------------------------------------------------------
* implementation of matrix operations
* -----------------------------------------------------------------
*/
SUNMatrix_ID SUNMatGetID_cuSparse(SUNMatrix A)
{
return(SUNMATRIX_CUSPARSE);
}
/* Returns a new matrix allocated to have the same structure as A,
but it does not copy any nonzeros, column vals, or row pointers. */
SUNMatrix SUNMatClone_cuSparse(SUNMatrix A)
{
SUNMatrix B;
switch (SMCU_SPARSETYPE(A))
{
case SUNMAT_CUSPARSE_CSR:
B = SUNMatrix_cuSparse_NewCSR(SMCU_ROWS(A), SMCU_COLUMNS(A), SMCU_NNZ(A),
SMCU_CUSPHANDLE(A));
break;
case SUNMAT_CUSPARSE_BCSR:
B = SUNMatrix_cuSparse_NewBlockCSR(SMCU_NBLOCKS(A), SMCU_BLOCKROWS(A), SMCU_BLOCKCOLS(A),
SMCU_BLOCKNNZ(A), SMCU_CUSPHANDLE(A));
break;
default:
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatClone_cuSparse: sparse type not recognized\n");
B = NULL;
}
SMCU_FIXEDPATTERN(B) = SMCU_FIXEDPATTERN(A);
delete SMCU_EXECPOLICY(B);
SMCU_EXECPOLICY(B) = SMCU_EXECPOLICY(A)->clone();
return(B);
}
/* Deallocates the SUNMatrix object and all data it owns */
void SUNMatDestroy_cuSparse(SUNMatrix A)
{
if (A == NULL) return;
/* free content */
if (A->content != NULL)
{
if (SMCU_MEMHELP(A))
{
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_DATA(A));
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_INDEXPTRS(A));
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_INDEXVALS(A));
}
else
{
SUNDIALS_DEBUG_PRINT("WARNING in SUNMatDestroy_cuSparse: mem_helper was NULL when trying to dealloc data, this could result in a memory leak\n");
}
if (SMCU_OWNMATD(A))
{
/* free cusparseMatDescr_t */
SUNDIALS_CUSPARSE_VERIFY( cusparseDestroyMatDescr(SMCU_MATDESCR(A)) );
}
#if CUDART_VERSION >= 11000
SUNDIALS_CUSPARSE_VERIFY( cusparseDestroyDnVec(SMCU_CONTENT(A)->vecX) );
SUNDIALS_CUSPARSE_VERIFY( cusparseDestroyDnVec(SMCU_CONTENT(A)->vecY) );
SUNDIALS_CUSPARSE_VERIFY( cusparseDestroySpMat(SMCU_CONTENT(A)->spmat_descr) );
SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_CONTENT(A)->dBufferMem);
#endif
if (SMCU_EXECPOLICY(A) && SMCU_OWNEXEC(A))
{
delete SMCU_EXECPOLICY(A);
SMCU_EXECPOLICY(A) = NULL;
}
SUNMemoryHelper_Destroy(SMCU_MEMHELP(A));
/* free content struct */
free(A->content);
A->content = NULL;
}
/* free ops and matrix */
if (A->ops) { free(A->ops); A->ops = NULL; }
free(A); A = NULL;
return;
}
/* Performs A_ij = 0 */
int SUNMatZero_cuSparse(SUNMatrix A)
{
cudaError_t cuerr;
cudaStream_t stream;
stream = *SMCU_EXECPOLICY(A)->stream();
/* set all data to zero */
cuerr = cudaMemsetAsync(SMCU_DATAp(A), 0, SMCU_NNZ(A)*sizeof(realtype), stream);
if (!SUNDIALS_CUDA_VERIFY(cuerr)) return(SUNMAT_OPERATION_FAIL);
/* set all rowptrs to zero unless the sparsity pattern is fixed */
if (!SMCU_FIXEDPATTERN(A))
{
cuerr = cudaMemsetAsync(SMCU_INDEXPTRSp(A), 0,
(SMCU_BLOCKROWS(A)+1)*sizeof(int),
stream);
if (!SUNDIALS_CUDA_VERIFY(cuerr)) return(SUNMAT_OPERATION_FAIL);
/* set all colind to zero */
cuerr = cudaMemsetAsync(SMCU_INDEXVALSp(A), 0,
SMCU_BLOCKNNZ(A)*sizeof(int),
stream);
if (!SUNDIALS_CUDA_VERIFY(cuerr)) return(SUNMAT_OPERATION_FAIL);
}
return(SUNMAT_SUCCESS);
}
/* Copies the nonzeros, column vals, and row pointers into dst */
int SUNMatCopy_cuSparse(SUNMatrix src, SUNMatrix dst)
{
int retval;
const cudaStream_t* stream;
/* Verify that src and dst are compatible */
if (!SMCompatible_cuSparse(src, dst))
return(SUNMAT_ILL_INPUT);
stream = SMCU_EXECPOLICY(src)->stream();
/* Ensure that dst is allocated with at least as
much memory as we have nonzeros in src */
if (SMCU_NNZ(dst) < SMCU_NNZ(src))
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatCopy_cuSparse: the destination matrix has less nonzeros than the source\n");
return(SUNMAT_ILL_INPUT);
}
/* Zero out dst so that copy works correctly */
if (SUNMatZero_cuSparse(dst) != SUNMAT_SUCCESS)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatCopy_cuSparse: SUNMatZero_cuSparse failed\n");
return(SUNMAT_OPERATION_FAIL);
}
/* Copy the data over */
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(src),
SMCU_DATA(dst),
SMCU_DATA(src),
SMCU_NNZ(src)*sizeof(realtype),
(void*) stream);
if (retval) return(SUNMAT_OPERATION_FAIL);
/* Copy the row pointers over */
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(src),
SMCU_INDEXPTRS(dst),
SMCU_INDEXPTRS(src),
(SMCU_BLOCKROWS(src)+1)*sizeof(int),
(void*) stream);
if (retval) return(SUNMAT_OPERATION_FAIL);
/* Copy the column indices over */
retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(src),
SMCU_INDEXVALS(dst),
SMCU_INDEXVALS(src),
SMCU_BLOCKNNZ(src)*sizeof(int),
(void*) stream);
if (retval) return(SUNMAT_OPERATION_FAIL);
return(SUNMAT_SUCCESS);
}
/* Performs A = cA + I. Requires the diagonal to be allocated already. */
int SUNMatScaleAddI_cuSparse(realtype c, SUNMatrix A)
{
unsigned threadsPerBlock, gridSize;
cudaStream_t stream = *SMCU_EXECPOLICY(A)->stream();
switch (SMCU_SPARSETYPE(A))
{
case SUNMAT_CUSPARSE_CSR:
/* Choose the grid size to be the number of rows in the matrix,
and then choose threadsPerBlock to be a multiple of the warp size
that results in enough threads to have one per 2 columns. */
threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_COLUMNS(A)/2);
gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_ROWS(A)*SMCU_COLUMNS(A)/2, threadsPerBlock);
scaleAddIKernelCSR<realtype, int>
<<<gridSize, threadsPerBlock, 0, stream>>>(SMCU_ROWS(A),
c,
SMCU_DATAp(A),
SMCU_INDEXPTRSp(A),
SMCU_INDEXVALSp(A));
break;
case SUNMAT_CUSPARSE_BCSR:
/* Choose the grid size to be the number of blocks in the matrix,
and then choose threadsPerBlock to be a multiple of the warp size
that results in enough threads to have one per row of the block. */
threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_BLOCKROWS(A));
gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_NBLOCKS(A)*SMCU_BLOCKROWS(A), threadsPerBlock);
scaleAddIKernelBCSR<realtype, int>
<<<gridSize, threadsPerBlock, 0, stream>>>(SMCU_BLOCKROWS(A),
SMCU_NBLOCKS(A),
SMCU_BLOCKNNZ(A),
c,
SMCU_DATAp(A),
SMCU_INDEXPTRSp(A),
SMCU_INDEXVALSp(A));
break;
default:
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatScaleAddI_cuSparse: sparse type not recognized\n");
return(SUNMAT_ILL_INPUT);
}
#ifdef SUNDIALS_DEBUG_CUDA_LASTERROR
cudaDeviceSynchronize();
if (!SUNDIALS_CUDA_VERIFY(cudaGetLastError())) return(SUNMAT_OPERATION_FAIL);
#endif
return(SUNMAT_SUCCESS);
}
/* Performs A = cA + B */
int SUNMatScaleAdd_cuSparse(realtype c, SUNMatrix A, SUNMatrix B)
{
cudaStream_t stream;
unsigned threadsPerBlock, gridSize;
if (!SMCompatible_cuSparse(A, B))
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatScaleAdd_cuSparse: SUNMatScaleAdd_cuSparse failed\n");
return(SUNMAT_ILL_INPUT);
}
stream = *SMCU_EXECPOLICY(A)->stream();
switch (SMCU_SPARSETYPE(A))
{
case SUNMAT_CUSPARSE_CSR:
/* Choose the grid size to be the number of rows in the matrix,
and then choose threadsPerBlock to be a multiple of the warp size
that results in enough threads to have one per 2 columns. */
threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_COLUMNS(A)/2);
gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_ROWS(A)*SMCU_COLUMNS(A)/2, threadsPerBlock);
scaleAddKernelCSR<realtype, int>
<<<gridSize, threadsPerBlock, 0, stream>>>(SMCU_NNZ(A),
c,
SMCU_DATAp(A),
SMCU_DATAp(B));
break;
case SUNMAT_CUSPARSE_BCSR:
/* Choose the grid size to be the number of blocks in the matrix,
and then choose threadsPerBlock to be a multiple of the warp size
that results in enough threads to have one per row of the block. */
threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_BLOCKROWS(A));
gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_NBLOCKS(A)*SMCU_BLOCKROWS(A), threadsPerBlock);
scaleAddKernelCSR<realtype, int>
<<<gridSize, threadsPerBlock, 0, stream>>>(SMCU_NNZ(A),
c,
SMCU_DATAp(A),
SMCU_DATAp(B));
break;
default:
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatScaleAdd_cuSparse: sparse type not recognized\n");
return(SUNMAT_ILL_INPUT);
}
#ifdef SUNDIALS_DEBUG_CUDA_LASTERROR
cudaDeviceSynchronize();
if (!SUNDIALS_CUDA_VERIFY(cudaGetLastError())) return(SUNMAT_OPERATION_FAIL);
#endif
return(SUNMAT_SUCCESS);
}
/* Setup buffers needed for Matvec */
int SUNMatMatvecSetup_cuSparse(SUNMatrix A)
{
#if CUDART_VERSION >= 11000
realtype placeholder[1];
const realtype one = ONE;
/* Check if setup has already been done */
if (!(SMCU_CONTENT(A)->matvec_issetup))
{
SUNDIALS_CUSPARSE_VERIFY( cusparseCreateDnVec(&SMCU_CONTENT(A)->vecX,
SMCU_COLUMNS(A),
placeholder, CUDA_R_XF) );
SUNDIALS_CUSPARSE_VERIFY( cusparseCreateDnVec(&SMCU_CONTENT(A)->vecY,
SMCU_ROWS(A),
placeholder, CUDA_R_XF) );
SUNDIALS_CUSPARSE_VERIFY(
cusparseSpMV_bufferSize(SMCU_CUSPHANDLE(A),
CUSPARSE_OPERATION_NON_TRANSPOSE,
&one, SMCU_CONTENT(A)->spmat_descr,
SMCU_CONTENT(A)->vecX, &one, SMCU_CONTENT(A)->vecY,
CUDA_R_XF, CUSPARSE_MV_ALG_DEFAULT,
&SMCU_CONTENT(A)->bufferSize) );
if ( SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &SMCU_CONTENT(A)->dBufferMem,
SMCU_CONTENT(A)->bufferSize, SUNMEMTYPE_DEVICE) )
return(SUNMAT_OPERATION_FAIL);
}
#endif
SMCU_CONTENT(A)->matvec_issetup = SUNTRUE;
return(SUNMAT_SUCCESS);
}
/* Perform y = Ax */
int SUNMatMatvec_cuSparse(SUNMatrix A, N_Vector x, N_Vector y)
{
/* Verify that the dimensions of A, x, and y agree */
if ( (SMCU_COLUMNS(A) != N_VGetLength(x)) ||
(SMCU_ROWS(A) != N_VGetLength(y)) )
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatMatvec_cuSparse: dimensions do not agree\n");
return(SUNMAT_ILL_INPUT);
}
realtype *d_xdata = N_VGetDeviceArrayPointer(x);
realtype *d_ydata = N_VGetDeviceArrayPointer(y);
if (SMCU_SPARSETYPE(A) == SUNMAT_CUSPARSE_CSR)
{
const realtype one = ONE;
/* Zero result vector */
N_VConst(ZERO, y);
#if CUDART_VERSION >= 11000
{
/* Setup matvec if it has not been done yet */
if (!SMCU_CONTENT(A)->matvec_issetup && SUNMatMatvecSetup_cuSparse(A))
{
return(SUNMAT_OPERATION_FAIL);
}
SUNDIALS_CUSPARSE_VERIFY( cusparseDnVecSetValues(SMCU_CONTENT(A)->vecX,
d_xdata) );
SUNDIALS_CUSPARSE_VERIFY( cusparseDnVecSetValues(SMCU_CONTENT(A)->vecY,
d_ydata) );
SUNDIALS_CUSPARSE_VERIFY( cusparseSpMV(SMCU_CUSPHANDLE(A),
CUSPARSE_OPERATION_NON_TRANSPOSE,
&one, SMCU_CONTENT(A)->spmat_descr,
SMCU_CONTENT(A)->vecX, &one,
SMCU_CONTENT(A)->vecY, CUDA_R_XF,
CUSPARSE_MV_ALG_DEFAULT,
SMCU_CONTENT(A)->dBufferMem->ptr) );
}
#else
SUNDIALS_CUSPARSE_VERIFY(
cusparseXcsrmv(SMCU_CUSPHANDLE(A), CUSPARSE_OPERATION_NON_TRANSPOSE,
SMCU_ROWS(A), SMCU_COLUMNS(A), SMCU_NNZ(A),
&one, SMCU_MATDESCR(A), SMCU_DATAp(A), SMCU_INDEXPTRSp(A),
SMCU_INDEXVALSp(A), d_xdata, &one, d_ydata) );
#endif
}
else if (SMCU_SPARSETYPE(A) == SUNMAT_CUSPARSE_BCSR)
{
cudaStream_t stream;
unsigned gridSize, threadsPerBlock;
stream = *SMCU_EXECPOLICY(A)->stream();
/* Choose the grid size to be the number of blocks in the matrix,
and then choose threadsPerBlock to be a multiple of the warp size
that results in enough threads to have one per row of the block. */
threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_COLUMNS(A)/2);
gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_ROWS(A)*SMCU_COLUMNS(A)/2, threadsPerBlock);
matvecBCSR<realtype, int>
<<<gridSize, threadsPerBlock, 0, stream>>>(SMCU_BLOCKROWS(A),
SMCU_NBLOCKS(A),
SMCU_BLOCKNNZ(A),
SMCU_DATAp(A),
SMCU_INDEXPTRSp(A),
SMCU_INDEXVALSp(A),
d_xdata,
d_ydata);
}
else
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatMatvec_cuSparse: sparse type not recognized\n");
return(SUNMAT_ILL_INPUT);
}
#ifdef SUNDIALS_DEBUG_CUDA_LASTERROR
cudaDeviceSynchronize();
if (!SUNDIALS_CUDA_VERIFY(cudaGetLastError())) return(SUNMAT_OPERATION_FAIL);
#endif
return(SUNMAT_SUCCESS);
}
/*
* =================================================================
* private functions
* =================================================================
*/
/* -----------------------------------------------------------------
* Function to check compatibility of two sparse SUNMatrix objects
*/
static booleantype SMCompatible_cuSparse(SUNMatrix A, SUNMatrix B)
{
/* both matrices must be sparse */
if ( (SUNMatGetID(A) != SUNMATRIX_CUSPARSE) ||
(SUNMatGetID(B) != SUNMATRIX_CUSPARSE) )
return(SUNFALSE);
/* both matrices must have the same shape and sparsity type */
if (SMCU_ROWS(A) != SMCU_ROWS(B))
return(SUNFALSE);
if (SMCU_COLUMNS(A) != SMCU_COLUMNS(B))
return(SUNFALSE);
if (SMCU_SPARSETYPE(A) != SMCU_SPARSETYPE(B))
return(SUNFALSE);
return(SUNTRUE);
}
/* -----------------------------------------------------------------
* Function to create empty SUNMatrix with ops attached and
* the content structure allocated.
*/
SUNMatrix SUNMatrix_cuSparse_NewEmpty()
{
/* Create an empty matrix object */
SUNMatrix A = NULL;
A = SUNMatNewEmpty();
if (A == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewEmpty: SUNMatNewEmpty failed\n");
return(NULL);
}
/* Attach operations */
A->ops->getid = SUNMatGetID_cuSparse;
A->ops->clone = SUNMatClone_cuSparse;
A->ops->destroy = SUNMatDestroy_cuSparse;
A->ops->zero = SUNMatZero_cuSparse;
A->ops->copy = SUNMatCopy_cuSparse;
A->ops->scaleadd = SUNMatScaleAdd_cuSparse;
A->ops->scaleaddi = SUNMatScaleAddI_cuSparse;
A->ops->matvecsetup = SUNMatMatvecSetup_cuSparse;
A->ops->matvec = SUNMatMatvec_cuSparse;
/* Create content */
SUNMatrix_Content_cuSparse content = NULL;
content = (SUNMatrix_Content_cuSparse) malloc(sizeof *content);
if (content == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewEmpty: failed to malloc content\n");
SUNMatDestroy(A);
return(NULL);
}
/* Attach content */
A->content = content;
content->mem_helper = NULL;
return(A);
}
#if CUDART_VERSION >= 11000
cusparseStatus_t CreateSpMatDescr(SUNMatrix A, cusparseSpMatDescr_t *spmat_descr)
{
/* CUDA 11 introduced the "Generic API" and removed the cusparseXcsrmv that
works on the old cusparseMatDescr_t and raw data arrays. However,
cuSolverSp stuff requires the cusparseMatDescr_t still. So, we have to
create this cusparseSpMatDescr_t *and* the cusparseMatDescr_t. */
return(cusparseCreateCsr(spmat_descr, SMCU_ROWS(A), SMCU_COLUMNS(A),
SMCU_NNZ(A), SMCU_INDEXPTRSp(A),
SMCU_INDEXVALSp(A), SMCU_DATAp(A),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_XF));
}
#endif
|
f2edd161a3139c2ce7a4004a98ce165539574e63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Fermat
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <pathtracer.h>
#include <renderer.h>
#include <mesh/MeshStorage.h>
#include <cugar/basic/timer.h>
#include <cugar/basic/primitives.h>
#include <cugar/basic/cuda/warp_atomics.h>
#include <cugar/basic/memory_arena.h>
#include <bsdf.h>
#include <edf.h>
#include <mis_utils.h>
#include <bpt_utils.h>
#include <eaw.h>
#include <vector>
#define SHIFT_RES 256u
#define SHADE_HITS_BLOCKSIZE 64
#define SHADE_HITS_CTA_BLOCKS 16
#define DEBUG_PIXEL (187 + 826 * 1600)
#define MIS_HEURISTIC POWER_HEURISTIC
namespace {
union PixelInfo
{
FERMAT_HOST_DEVICE PixelInfo() {}
FERMAT_HOST_DEVICE PixelInfo(const uint32 _packed) : packed(_packed) {}
FERMAT_HOST_DEVICE PixelInfo(const uint32 _pixel, const uint32 _comp) : pixel(_pixel), comp(_comp) {}
uint32 packed;
struct
{
uint32 pixel : 28;
uint32 comp : 4;
};
};
struct PTRayQueue
{
Ray* rays;
Hit* hits;
float4* weights; // path weight
float4* weights_d; // diffuse path weight
float4* weights_g; // glossy path weight
uint32* pixels;
uint32* size;
FERMAT_DEVICE
void warp_append(const PixelInfo pixel, const Ray& ray, const float4 weight)
{
cugar::cuda::warp_static_atomic atomic_adder(size);
uint32 slot;
atomic_adder.add<1>(true, &slot);
//slot = atomicAdd(size, 1u);
rays[slot] = ray;
weights[slot] = weight;
pixels[slot] = pixel.packed;
}
FERMAT_DEVICE
void warp_append(const PixelInfo pixel, const Ray& ray, const float4 weight, const float4 weight_d, const float4 weight_g)
{
cugar::cuda::warp_static_atomic atomic_adder(size);
uint32 slot;
atomic_adder.add<1>(true, &slot);
//slot = atomicAdd(size, 1u);
rays[slot] = ray;
weights[slot] = weight;
weights_d[slot] = weight_d;
weights_g[slot] = weight_g;
pixels[slot] = pixel.packed;
}
};
struct PathTracingContext
{
PTOptions options;
uint32 in_bounce;
TiledSequenceView sequence;
PTRayQueue in_queue;
PTRayQueue shadow_queue;
PTRayQueue scatter_queue;
};
//------------------------------------------------------------------------------
__global__ void generate_primary_rays_kernel(PathTracingContext context, RendererView renderer, cugar::Vector3f U, cugar::Vector3f V, cugar::Vector3f W)
{
int pixel_x = threadIdx.x + blockIdx.x*blockDim.x;
int pixel_y = threadIdx.y + blockIdx.y*blockDim.y;
if (pixel_x >= renderer.res_x || pixel_y >= renderer.res_y)
return;
int idx = pixel_x + pixel_y*renderer.res_x;
// use an optimized sampling pattern to rotate a Halton sequence
const cugar::Vector2f uv(
context.sequence.sample_2d(pixel_x, pixel_y, 0),
context.sequence.sample_2d(pixel_x, pixel_y, 1));
const float2 d = make_float2(
(pixel_x + uv.x) / float(renderer.res_x),
(pixel_y + uv.y) / float(renderer.res_y)) * 2.f - 1.f;
// write the pixel index
context.in_queue.pixels[idx] = idx;
float3 ray_origin = renderer.camera.eye;
float3 ray_direction = d.x*U + d.y*V + W;
reinterpret_cast<float4*>(context.in_queue.rays)[2 * idx + 0] = make_float4(ray_origin.x, ray_origin.y, ray_origin.z, 0.0f); // origin, tmin
reinterpret_cast<float4*>(context.in_queue.rays)[2 * idx + 1] = make_float4(ray_direction.x, ray_direction.y, ray_direction.z, 1e34f); // dir, tmax
// write the filter weight
context.in_queue.weights[idx] = cugar::Vector4f(1.0f, 1.0f, 1.0f, 1.0f);
if (idx == 0)
*context.in_queue.size = renderer.res_x * renderer.res_y;
}
//------------------------------------------------------------------------------
void generate_primary_rays(PathTracingContext context, const RendererView renderer)
{
cugar::Vector3f U, V, W;
camera_frame(renderer.camera, renderer.aspect, U, V, W);
dim3 blockSize(32, 16);
dim3 gridSize(cugar::divide_ri(renderer.res_x, blockSize.x), cugar::divide_ri(renderer.res_y, blockSize.y));
generate_primary_rays_kernel << < gridSize, blockSize >> > (context, renderer, U, V, W);
}
//------------------------------------------------------------------------------
template <uint32 NUM_WARPS>
__global__
__launch_bounds__(SHADE_HITS_BLOCKSIZE, SHADE_HITS_CTA_BLOCKS)
void shade_hits_kernel(const uint32 in_queue_size, PathTracingContext context, RendererView renderer, const float frame_weight, const bool do_nee, const bool do_accumulate_emissive, const bool do_scatter)
{
const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < in_queue_size) // *context.in_queue.size
{
const PixelInfo pixel_info = context.in_queue.pixels[thread_id];
const Ray ray = context.in_queue.rays[thread_id];
const Hit hit = context.in_queue.hits[thread_id];
const cugar::Vector4f w = context.in_queue.weights[thread_id];
const float p_prev = w.w;
const uint32 pixel_x = pixel_info.pixel % renderer.res_x;
const uint32 pixel_y = pixel_info.pixel / renderer.res_x;
// initialize our shifted sampling sequence
float samples[6];
for (uint32 i = 0; i < 6; ++i)
samples[i] = context.sequence.sample_2d(pixel_x, pixel_y, (context.in_bounce + 1) * 6 + i);
if (hit.t > 0.0f && hit.triId >= 0)
{
EyeVertex ev;
ev.setup(ray, hit, w.xyz(), cugar::Vector4f(0.0f), context.in_bounce, renderer);
// write out gbuffer information
if (context.in_bounce == 0)
{
renderer.fb.gbuffer.geo(pixel_info.pixel) = GBufferView::pack_geometry(ev.geom.position, ev.geom.normal_s);
renderer.fb.gbuffer.uv(pixel_info.pixel) = make_float4(hit.u, hit.v, ev.geom.texture_coords.x, ev.geom.texture_coords.y);
renderer.fb.gbuffer.tri(pixel_info.pixel) = hit.triId;
}
cugar::Vector3f in = -cugar::normalize(cugar::Vector3f(ray.dir));
// perform next-event estimation to compute direct lighting
if (do_nee)
{
// fetch the sampling dimensions
const float z[3] = { samples[0], samples[1], samples[2] }; // use dimensions 0,1,2
VertexGeometryId light_vertex;
VertexGeometry light_vertex_geom;
float light_pdf;
Edf light_edf;
// sample the light source surface
renderer.mesh_vpls.sample(z, &light_vertex.prim_id, &light_vertex.uv, &light_vertex_geom, &light_pdf, &light_edf);
//renderer.mesh_light.sample(z, &light_vertex.prim_id, &light_vertex.uv, &light_vertex_geom, &light_pdf, &light_edf);
// join the light sample with the current vertex
cugar::Vector3f out = (light_vertex_geom.position - ev.geom.position);
const float d2 = fmaxf(1.0e-8f, cugar::square_length(out));
// normalize the outgoing direction
out *= rsqrtf(d2);
cugar::Vector3f f_s_comp[Bsdf::kNumComponents];
float p_s_comp[Bsdf::kNumComponents];
ev.bsdf.f_and_p(ev.geom, ev.in, out, f_s_comp, p_s_comp, cugar::kProjectedSolidAngle);
cugar::Vector3f f_s(0.0f);
float p_s(0.0f);
for (uint32 i = 0; i < Bsdf::kNumComponents; ++i)
{
f_s += f_s_comp[i];
p_s += p_s_comp[i];
}
// evaluate the light's EDF and the surface BSDF
const cugar::Vector3f f_L = light_edf.f(light_vertex_geom, light_vertex_geom.position, -out) / light_pdf;
// evaluate the geometric term
const float G = fabsf(cugar::dot(out, ev.geom.normal_s) * cugar::dot(out, light_vertex_geom.normal_s)) / d2;
// TODO: perform MIS with the possibility of directly hitting the light source
const float p1 = light_pdf;
const float p2 = p_s * G;
const float mis_w =
(context.in_bounce == 0 && context.options.direct_lighting_bsdf) ||
(context.in_bounce > 0 && context.options.indirect_lighting_bsdf) ? mis_heuristic<MIS_HEURISTIC>(p1, p2) : 1.0f;
// calculate the cumulative sample weight, equal to f_L * f_s * G / p
const cugar::Vector3f out_w = w.xyz() * f_L * f_s * G * mis_w;
const cugar::Vector3f out_w_d = (context.in_bounce == 0 ? f_s_comp[Bsdf::kDiffuseReflectionIndex] + f_s_comp[Bsdf::kDiffuseTransmissionIndex] : f_s) * w.xyz() * f_L * G * mis_w;
const cugar::Vector3f out_w_g = (context.in_bounce == 0 ? f_s_comp[Bsdf::kGlossyReflectionIndex] + f_s_comp[Bsdf::kGlossyTransmissionIndex] : f_s) * w.xyz() * f_L * G * mis_w;
if (cugar::max_comp(out_w) > 0.0f && cugar::is_finite(out_w))
{
// enqueue the output ray
Ray out_ray;
out_ray.origin = ev.geom.position - ray.dir * 1.0e-4f; // shift back in space along the viewing direction
out_ray.dir = (light_vertex_geom.position - out_ray.origin); //out;
out_ray.tmin = 0.0f;
out_ray.tmax = 0.9999f; //d * 0.9999f;
const PixelInfo out_pixel = pixel_info;
context.shadow_queue.warp_append(
out_pixel,
out_ray,
cugar::Vector4f(out_w, 0.0f),
cugar::Vector4f(out_w_d, 0.0f),
cugar::Vector4f(out_w_g, 0.0f) );
}
}
// accumulate the emissive component along the incoming direction
if (do_accumulate_emissive)
{
VertexGeometry light_vertex_geom = ev.geom;
float light_pdf;
Edf light_edf;
renderer.mesh_vpls.map(hit.triId, cugar::Vector2f(hit.u, hit.v), light_vertex_geom, &light_pdf, &light_edf);
//renderer.mesh_light.map(hit.triId, cugar::Vector2f(hit.u, hit.v), light_vertex_geom, &light_pdf, &light_edf);
// evaluate the edf's output along the incoming direction
const cugar::Vector3f f_L = light_edf.f(light_vertex_geom, light_vertex_geom.position, ev.in);
const float d2 = fmaxf(1.0e-10f, hit.t * hit.t);
// compute the MIS weight with next event estimation at the previous vertex
const float G_partial = fabsf(cugar::dot(ev.in, light_vertex_geom.normal_s)) / d2; // NOTE: G_partial doesn't include the dot product between 'in and the normal at the previous vertex
const float p1 = G_partial * p_prev; // NOTE: p_prev is the solid angle probability of sampling the BSDF at the previous vertex, i.e. p_proj * dot(in,normal)
const float p2 = light_pdf;
const float mis_w =
(context.in_bounce == 1 && context.options.direct_lighting_nee) ||
(context.in_bounce > 1 && context.options.indirect_lighting_nee) ? mis_heuristic<MIS_HEURISTIC>(p1, p2) : 1.0f;
// and accumulate the weighted contribution
const cugar::Vector3f out_w = w.xyz() * f_L * mis_w;
// and accumulate the weighted contribution
if (cugar::max_comp(out_w) > 0.0f && cugar::is_finite(out_w))
{
// accumulate to the image
add_in<false>(renderer.fb(FBufferDesc::COMPOSITED_C), pixel_info.pixel, out_w, frame_weight);
// accumulate the per-component value to the proper output channel
if (context.in_bounce == 0)
add_in<false>(renderer.fb(FBufferDesc::DIRECT_C), pixel_info.pixel, out_w, frame_weight);
else
{
if (pixel_info.comp & Bsdf::kDiffuseMask) add_in<true>(renderer.fb(FBufferDesc::DIFFUSE_C), pixel_info.pixel, out_w, frame_weight);
if (pixel_info.comp & Bsdf::kGlossyMask) add_in<true>(renderer.fb(FBufferDesc::SPECULAR_C), pixel_info.pixel, out_w, frame_weight);
}
}
}
// trace a bounce ray
if (do_scatter)
{
// fetch the sampling dimensions
const float z[3] = { samples[3], samples[4], samples[5] }; // use dimensions 3,4,5
// sample a scattering event
cugar::Vector3f out(0.0f);
cugar::Vector3f g(0.0f);
float p(0.0f);
float p_proj(0.0f);
Bsdf::ComponentType out_comp(Bsdf::kAbsorption);
scatter(ev, z, out_comp, out, p, p_proj, g, true, false);
cugar::Vector3f out_w = g * w.xyz();
if (context.in_bounce == 0)
{
renderer.fb(FBufferDesc::DIFFUSE_A, pixel_info.pixel) += cugar::Vector4f(ev.material.diffuse) * frame_weight;
renderer.fb(FBufferDesc::SPECULAR_A, pixel_info.pixel) += (cugar::Vector4f(ev.material.specular) + cugar::Vector4f(1.0f))*0.5f * frame_weight;
}
if (cugar::max_comp(out_w) > 0.0f && cugar::is_finite(out_w))
{
// enqueue the output ray
Ray out_ray;
out_ray.origin = ev.geom.position;
out_ray.dir = out;
out_ray.tmin = 1.0e-4f;
out_ray.tmax = 1.0e8f;
const float out_p = p;
const PixelInfo out_pixel = context.in_bounce ?
pixel_info : // if this sample is a secondary bounce, use the previously selected channel
PixelInfo(pixel_info.pixel, out_comp); // otherwise (i.e. this is the first bounce) choose the output channel for the rest of the path
context.scatter_queue.warp_append(
out_pixel,
out_ray,
cugar::Vector4f(out_w, out_p) );
}
}
}
else
{
// hit the environment - perform sky lighting
}
}
}
void shade_hits(const uint32 in_queue_size, PathTracingContext context, RendererView renderer)
{
const uint32 blockSize(SHADE_HITS_BLOCKSIZE);
const dim3 gridSize(cugar::divide_ri(in_queue_size, blockSize));
// decide whether to perform next-event estimation
const bool do_nee =
((context.in_bounce + 2 <= context.options.max_path_length) &&
((context.in_bounce == 0 && context.options.direct_lighting_nee) ||
(context.in_bounce > 0 && context.options.indirect_lighting_nee)));
// decide whether to evaluate and accumulate emissive surfaces
const bool do_accumulate_emissive =
((context.in_bounce == 0 && context.options.visible_lights) ||
(context.in_bounce == 1 && context.options.direct_lighting_bsdf) ||
(context.in_bounce > 1 && context.options.indirect_lighting_bsdf));
// compute the number of path vertices we want to generate from the eye
const uint32 max_path_vertices = context.options.max_path_length +
((context.options.max_path_length == 2 && context.options.direct_lighting_bsdf) ||
(context.options.max_path_length > 2 && context.options.indirect_lighting_bsdf) ? 1 : 0);
// decide whether to perform scattering
const bool do_scatter = (context.in_bounce + 2 < max_path_vertices);
shade_hits_kernel<blockSize / 32> << < gridSize, blockSize >> > (in_queue_size, context, renderer, 1.0f / float(renderer.instance + 1), do_nee, do_accumulate_emissive, do_scatter);
}
__global__
void solve_occlusion_kernel(const uint32 in_queue_size, PathTracingContext context, RendererView renderer, const float frame_weight)
{
const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < in_queue_size) // *context.shadow_queue.size
{
const PixelInfo pixel_info = context.shadow_queue.pixels[thread_id];
const Hit hit = context.shadow_queue.hits[thread_id];
const cugar::Vector4f w = context.shadow_queue.weights[thread_id];
const cugar::Vector4f w_d = context.shadow_queue.weights_d[thread_id];
const cugar::Vector4f w_g = context.shadow_queue.weights_g[thread_id];
// TODO: break this up in separate diffuse and specular components
if (hit.t < 0.0f)
{
add_in<false>( renderer.fb(FBufferDesc::COMPOSITED_C), pixel_info.pixel, w.xyz(), frame_weight );
if (context.in_bounce == 0)
{
// accumulate the per-component values to the respective output channels
add_in<true>( renderer.fb(FBufferDesc::DIFFUSE_C), pixel_info.pixel, w_d.xyz(), frame_weight );
add_in<true>( renderer.fb(FBufferDesc::SPECULAR_C), pixel_info.pixel, w_g.xyz(), frame_weight );
}
else
{
// accumulate the per-component value to the proper output channel
if (pixel_info.comp & Bsdf::kDiffuseMask) add_in<true>( renderer.fb(FBufferDesc::DIFFUSE_C), pixel_info.pixel, w_d.xyz(), frame_weight );
if (pixel_info.comp & Bsdf::kGlossyMask) add_in<true>( renderer.fb(FBufferDesc::SPECULAR_C), pixel_info.pixel, w_g.xyz(), frame_weight );;
}
}
}
}
void solve_occlusion(const uint32 in_queue_size, PathTracingContext context, RendererView renderer)
{
const uint32 blockSize(128);
const dim3 gridSize(cugar::divide_ri(in_queue_size, blockSize));
solve_occlusion_kernel << < gridSize, blockSize >> > (in_queue_size, context, renderer, 1.0f / float(renderer.instance + 1) );
}
void alloc_queues(
PTOptions options,
const uint32 n_pixels,
PTRayQueue& input_queue,
PTRayQueue& scatter_queue,
PTRayQueue& shadow_queue,
cugar::memory_arena& arena)
{
input_queue.rays = arena.alloc<Ray>(n_pixels);
input_queue.hits = arena.alloc<Hit>(n_pixels);
input_queue.weights = arena.alloc<float4>(n_pixels);
input_queue.weights_d = NULL;
input_queue.weights_g = NULL;
input_queue.pixels = arena.alloc<uint32>(n_pixels);
input_queue.size = arena.alloc<uint32>(1);
scatter_queue.rays = arena.alloc<Ray>(n_pixels);
scatter_queue.hits = arena.alloc<Hit>(n_pixels);
scatter_queue.weights = arena.alloc<float4>(n_pixels);
scatter_queue.weights_d = NULL;
scatter_queue.weights_g = NULL;
scatter_queue.pixels = arena.alloc<uint32>(n_pixels);
scatter_queue.size = arena.alloc<uint32>(1);
shadow_queue.rays = arena.alloc<Ray>(n_pixels);
shadow_queue.hits = arena.alloc<Hit>(n_pixels);
shadow_queue.weights = arena.alloc<float4>(n_pixels);
shadow_queue.weights_d = arena.alloc<float4>(n_pixels);
shadow_queue.weights_g = arena.alloc<float4>(n_pixels);
shadow_queue.pixels = arena.alloc<uint32>(n_pixels);
shadow_queue.size = arena.alloc<uint32>(1);
}
} // anonymous namespace
PathTracer::PathTracer() :
m_generator(32, cugar::LFSRGeneratorMatrix::GOOD_PROJECTIONS),
m_random(&m_generator, 1u, 1351u)
{
}
void PathTracer::init(int argc, char** argv, Renderer& renderer)
{
const uint2 res = renderer.res();
const uint32 n_pixels = res.x * res.y;
// parse the options
m_options.parse(argc, argv);
// pre-alloc queue storage
{
// determine how much storage we will need
cugar::memory_arena arena;
PTRayQueue input_queue;
PTRayQueue scatter_queue;
PTRayQueue shadow_queue;
alloc_queues(
m_options,
n_pixels,
input_queue,
scatter_queue,
shadow_queue,
arena );
fprintf(stderr, " allocating queue storage: %.1f MB\n", float(arena.size) / (1024*1024));
m_memory_pool.alloc(arena.size);
}
// build the set of shifts
const uint32 n_dimensions = 6 * (m_options.max_path_length + 1);
fprintf(stderr, " initializing sampler: %u dimensions\n", n_dimensions);
m_sequence.setup(n_dimensions, SHIFT_RES);
const uint32 n_light_paths = n_pixels;
fprintf(stderr, " creatign mesh lights... started\n");
// initialize the mesh lights sampler
renderer.m_mesh_lights.init( n_light_paths, renderer.m_mesh.view(), renderer.m_mesh_d.view(), renderer.m_texture_views_h.ptr(), renderer.m_texture_views_d.ptr() );
fprintf(stderr, " creatign mesh lights... done\n");
}
void PathTracer::render(const uint32 instance, Renderer& renderer)
{
// pre-multiply the previous frame for blending
renderer.rescale_frame( instance );
//fprintf(stderr, "render started (%u)\n", instance);
const uint2 res = renderer.res();
const uint32 n_pixels = res.x * res.y;
cugar::memory_arena arena( m_memory_pool.ptr() );
PTRayQueue queue1;
PTRayQueue queue2;
PTRayQueue shadow_queue;
alloc_queues(
m_options,
n_pixels,
queue1,
queue2,
shadow_queue,
arena );
cugar::Timer timer;
timer.start();
float path_rt_time = 0.0f;
float shadow_rt_time = 0.0f;
float path_shade_time = 0.0f;
float shadow_shade_time = 0.0f;
// fetch a view of the renderer
RendererView renderer_view = renderer.view(instance);
// setup the samples for this frame
m_sequence.set_instance(instance);
PathTracingContext context;
context.options = m_options;
context.in_bounce = 0;
context.in_queue = queue1;
context.scatter_queue = queue2;
context.shadow_queue = shadow_queue;
context.sequence = m_sequence.view();
generate_primary_rays(context, renderer_view);
CUDA_CHECK(cugar::cuda::sync_and_check_error("generate primary rays"));
for (context.in_bounce = 0;
context.in_bounce < context.options.max_path_length;
context.in_bounce++)
{
uint32 in_queue_size;
// fetch the amount of tasks in the queue
hipMemcpy(&in_queue_size, context.in_queue.size, sizeof(uint32), hipMemcpyDeviceToHost);
// check whether there's still any work left
if (in_queue_size == 0)
break;
// trace the rays generated at the previous bounce
//
{
FERMAT_CUDA_TIME(cugar::cuda::ScopedTimer<float> trace_timer(&path_rt_time));
optix::prime::Query query = renderer.m_model->createQuery(RTP_QUERY_TYPE_CLOSEST);
query->setRays(in_queue_size, Ray::format, RTP_BUFFER_TYPE_CUDA_LINEAR, context.in_queue.rays);
query->setHits(in_queue_size, Hit::format, RTP_BUFFER_TYPE_CUDA_LINEAR, context.in_queue.hits);
query->execute(0);
CUDA_CHECK(cugar::cuda::sync_and_check_error("trace"));
}
// reset the output queue counters
hipMemset(context.shadow_queue.size, 0x00, sizeof(uint32));
hipMemset(context.scatter_queue.size, 0x00, sizeof(uint32));
CUDA_CHECK(cugar::cuda::check_error("memset"));
// perform lighting at this bounce
//
{
FERMAT_CUDA_TIME(cugar::cuda::ScopedTimer<float> shade_timer(&path_shade_time));
shade_hits(in_queue_size, context, renderer_view);
CUDA_CHECK(cugar::cuda::sync_and_check_error("shade hits"));
}
// trace & accumulate occlusion queries
{
uint32 shadow_queue_size;
hipMemcpy(&shadow_queue_size, context.shadow_queue.size, sizeof(uint32), hipMemcpyDeviceToHost);
// trace the rays
//
if (shadow_queue_size)
{
FERMAT_CUDA_TIME(cugar::cuda::ScopedTimer<float> trace_timer(&shadow_rt_time));
optix::prime::Query query = renderer.m_model->createQuery(RTP_QUERY_TYPE_CLOSEST);
query->setRays(shadow_queue_size, Ray::format, RTP_BUFFER_TYPE_CUDA_LINEAR, shadow_queue.rays);
query->setHits(shadow_queue_size, Hit::format, RTP_BUFFER_TYPE_CUDA_LINEAR, shadow_queue.hits);
query->execute(0);
CUDA_CHECK(cugar::cuda::sync_and_check_error("trace occlusion"));
}
// shade the results
//
if (shadow_queue_size)
{
FERMAT_CUDA_TIME(cugar::cuda::ScopedTimer<float> shade_timer(&shadow_shade_time));
solve_occlusion(shadow_queue_size, context, renderer_view);
CUDA_CHECK(cugar::cuda::sync_and_check_error("solve occlusion"));
}
}
std::swap(context.in_queue, context.scatter_queue);
}
timer.stop();
const float time = timer.seconds();
// clear the global timer at instance zero
if (instance == 0)
m_time = time;
else
m_time += time;
fprintf(stderr, "\r %.1fs (%.1fms = rt[%.1fms + %.1fms] + shade[%.1fms + %.1fms]) ",
m_time,
time * 1000.0f,
path_rt_time * 1000.0f,
shadow_rt_time * 1000.0f,
path_shade_time * 1000.0f,
shadow_shade_time * 1000.0f);
renderer.update_variances( instance );
}
| f2edd161a3139c2ce7a4004a98ce165539574e63.cu | /*
* Fermat
*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <pathtracer.h>
#include <renderer.h>
#include <mesh/MeshStorage.h>
#include <cugar/basic/timer.h>
#include <cugar/basic/primitives.h>
#include <cugar/basic/cuda/warp_atomics.h>
#include <cugar/basic/memory_arena.h>
#include <bsdf.h>
#include <edf.h>
#include <mis_utils.h>
#include <bpt_utils.h>
#include <eaw.h>
#include <vector>
#define SHIFT_RES 256u
#define SHADE_HITS_BLOCKSIZE 64
#define SHADE_HITS_CTA_BLOCKS 16
#define DEBUG_PIXEL (187 + 826 * 1600)
#define MIS_HEURISTIC POWER_HEURISTIC
namespace {
union PixelInfo
{
FERMAT_HOST_DEVICE PixelInfo() {}
FERMAT_HOST_DEVICE PixelInfo(const uint32 _packed) : packed(_packed) {}
FERMAT_HOST_DEVICE PixelInfo(const uint32 _pixel, const uint32 _comp) : pixel(_pixel), comp(_comp) {}
uint32 packed;
struct
{
uint32 pixel : 28;
uint32 comp : 4;
};
};
struct PTRayQueue
{
Ray* rays;
Hit* hits;
float4* weights; // path weight
float4* weights_d; // diffuse path weight
float4* weights_g; // glossy path weight
uint32* pixels;
uint32* size;
FERMAT_DEVICE
void warp_append(const PixelInfo pixel, const Ray& ray, const float4 weight)
{
cugar::cuda::warp_static_atomic atomic_adder(size);
uint32 slot;
atomic_adder.add<1>(true, &slot);
//slot = atomicAdd(size, 1u);
rays[slot] = ray;
weights[slot] = weight;
pixels[slot] = pixel.packed;
}
FERMAT_DEVICE
void warp_append(const PixelInfo pixel, const Ray& ray, const float4 weight, const float4 weight_d, const float4 weight_g)
{
cugar::cuda::warp_static_atomic atomic_adder(size);
uint32 slot;
atomic_adder.add<1>(true, &slot);
//slot = atomicAdd(size, 1u);
rays[slot] = ray;
weights[slot] = weight;
weights_d[slot] = weight_d;
weights_g[slot] = weight_g;
pixels[slot] = pixel.packed;
}
};
struct PathTracingContext
{
PTOptions options;
uint32 in_bounce;
TiledSequenceView sequence;
PTRayQueue in_queue;
PTRayQueue shadow_queue;
PTRayQueue scatter_queue;
};
//------------------------------------------------------------------------------
__global__ void generate_primary_rays_kernel(PathTracingContext context, RendererView renderer, cugar::Vector3f U, cugar::Vector3f V, cugar::Vector3f W)
{
int pixel_x = threadIdx.x + blockIdx.x*blockDim.x;
int pixel_y = threadIdx.y + blockIdx.y*blockDim.y;
if (pixel_x >= renderer.res_x || pixel_y >= renderer.res_y)
return;
int idx = pixel_x + pixel_y*renderer.res_x;
// use an optimized sampling pattern to rotate a Halton sequence
const cugar::Vector2f uv(
context.sequence.sample_2d(pixel_x, pixel_y, 0),
context.sequence.sample_2d(pixel_x, pixel_y, 1));
const float2 d = make_float2(
(pixel_x + uv.x) / float(renderer.res_x),
(pixel_y + uv.y) / float(renderer.res_y)) * 2.f - 1.f;
// write the pixel index
context.in_queue.pixels[idx] = idx;
float3 ray_origin = renderer.camera.eye;
float3 ray_direction = d.x*U + d.y*V + W;
reinterpret_cast<float4*>(context.in_queue.rays)[2 * idx + 0] = make_float4(ray_origin.x, ray_origin.y, ray_origin.z, 0.0f); // origin, tmin
reinterpret_cast<float4*>(context.in_queue.rays)[2 * idx + 1] = make_float4(ray_direction.x, ray_direction.y, ray_direction.z, 1e34f); // dir, tmax
// write the filter weight
context.in_queue.weights[idx] = cugar::Vector4f(1.0f, 1.0f, 1.0f, 1.0f);
if (idx == 0)
*context.in_queue.size = renderer.res_x * renderer.res_y;
}
//------------------------------------------------------------------------------
void generate_primary_rays(PathTracingContext context, const RendererView renderer)
{
cugar::Vector3f U, V, W;
camera_frame(renderer.camera, renderer.aspect, U, V, W);
dim3 blockSize(32, 16);
dim3 gridSize(cugar::divide_ri(renderer.res_x, blockSize.x), cugar::divide_ri(renderer.res_y, blockSize.y));
generate_primary_rays_kernel << < gridSize, blockSize >> > (context, renderer, U, V, W);
}
//------------------------------------------------------------------------------
template <uint32 NUM_WARPS>
__global__
__launch_bounds__(SHADE_HITS_BLOCKSIZE, SHADE_HITS_CTA_BLOCKS)
void shade_hits_kernel(const uint32 in_queue_size, PathTracingContext context, RendererView renderer, const float frame_weight, const bool do_nee, const bool do_accumulate_emissive, const bool do_scatter)
{
const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < in_queue_size) // *context.in_queue.size
{
const PixelInfo pixel_info = context.in_queue.pixels[thread_id];
const Ray ray = context.in_queue.rays[thread_id];
const Hit hit = context.in_queue.hits[thread_id];
const cugar::Vector4f w = context.in_queue.weights[thread_id];
const float p_prev = w.w;
const uint32 pixel_x = pixel_info.pixel % renderer.res_x;
const uint32 pixel_y = pixel_info.pixel / renderer.res_x;
// initialize our shifted sampling sequence
float samples[6];
for (uint32 i = 0; i < 6; ++i)
samples[i] = context.sequence.sample_2d(pixel_x, pixel_y, (context.in_bounce + 1) * 6 + i);
if (hit.t > 0.0f && hit.triId >= 0)
{
EyeVertex ev;
ev.setup(ray, hit, w.xyz(), cugar::Vector4f(0.0f), context.in_bounce, renderer);
// write out gbuffer information
if (context.in_bounce == 0)
{
renderer.fb.gbuffer.geo(pixel_info.pixel) = GBufferView::pack_geometry(ev.geom.position, ev.geom.normal_s);
renderer.fb.gbuffer.uv(pixel_info.pixel) = make_float4(hit.u, hit.v, ev.geom.texture_coords.x, ev.geom.texture_coords.y);
renderer.fb.gbuffer.tri(pixel_info.pixel) = hit.triId;
}
cugar::Vector3f in = -cugar::normalize(cugar::Vector3f(ray.dir));
// perform next-event estimation to compute direct lighting
if (do_nee)
{
// fetch the sampling dimensions
const float z[3] = { samples[0], samples[1], samples[2] }; // use dimensions 0,1,2
VertexGeometryId light_vertex;
VertexGeometry light_vertex_geom;
float light_pdf;
Edf light_edf;
// sample the light source surface
renderer.mesh_vpls.sample(z, &light_vertex.prim_id, &light_vertex.uv, &light_vertex_geom, &light_pdf, &light_edf);
//renderer.mesh_light.sample(z, &light_vertex.prim_id, &light_vertex.uv, &light_vertex_geom, &light_pdf, &light_edf);
// join the light sample with the current vertex
cugar::Vector3f out = (light_vertex_geom.position - ev.geom.position);
const float d2 = fmaxf(1.0e-8f, cugar::square_length(out));
// normalize the outgoing direction
out *= rsqrtf(d2);
cugar::Vector3f f_s_comp[Bsdf::kNumComponents];
float p_s_comp[Bsdf::kNumComponents];
ev.bsdf.f_and_p(ev.geom, ev.in, out, f_s_comp, p_s_comp, cugar::kProjectedSolidAngle);
cugar::Vector3f f_s(0.0f);
float p_s(0.0f);
for (uint32 i = 0; i < Bsdf::kNumComponents; ++i)
{
f_s += f_s_comp[i];
p_s += p_s_comp[i];
}
// evaluate the light's EDF and the surface BSDF
const cugar::Vector3f f_L = light_edf.f(light_vertex_geom, light_vertex_geom.position, -out) / light_pdf;
// evaluate the geometric term
const float G = fabsf(cugar::dot(out, ev.geom.normal_s) * cugar::dot(out, light_vertex_geom.normal_s)) / d2;
// TODO: perform MIS with the possibility of directly hitting the light source
const float p1 = light_pdf;
const float p2 = p_s * G;
const float mis_w =
(context.in_bounce == 0 && context.options.direct_lighting_bsdf) ||
(context.in_bounce > 0 && context.options.indirect_lighting_bsdf) ? mis_heuristic<MIS_HEURISTIC>(p1, p2) : 1.0f;
// calculate the cumulative sample weight, equal to f_L * f_s * G / p
const cugar::Vector3f out_w = w.xyz() * f_L * f_s * G * mis_w;
const cugar::Vector3f out_w_d = (context.in_bounce == 0 ? f_s_comp[Bsdf::kDiffuseReflectionIndex] + f_s_comp[Bsdf::kDiffuseTransmissionIndex] : f_s) * w.xyz() * f_L * G * mis_w;
const cugar::Vector3f out_w_g = (context.in_bounce == 0 ? f_s_comp[Bsdf::kGlossyReflectionIndex] + f_s_comp[Bsdf::kGlossyTransmissionIndex] : f_s) * w.xyz() * f_L * G * mis_w;
if (cugar::max_comp(out_w) > 0.0f && cugar::is_finite(out_w))
{
// enqueue the output ray
Ray out_ray;
out_ray.origin = ev.geom.position - ray.dir * 1.0e-4f; // shift back in space along the viewing direction
out_ray.dir = (light_vertex_geom.position - out_ray.origin); //out;
out_ray.tmin = 0.0f;
out_ray.tmax = 0.9999f; //d * 0.9999f;
const PixelInfo out_pixel = pixel_info;
context.shadow_queue.warp_append(
out_pixel,
out_ray,
cugar::Vector4f(out_w, 0.0f),
cugar::Vector4f(out_w_d, 0.0f),
cugar::Vector4f(out_w_g, 0.0f) );
}
}
// accumulate the emissive component along the incoming direction
if (do_accumulate_emissive)
{
VertexGeometry light_vertex_geom = ev.geom;
float light_pdf;
Edf light_edf;
renderer.mesh_vpls.map(hit.triId, cugar::Vector2f(hit.u, hit.v), light_vertex_geom, &light_pdf, &light_edf);
//renderer.mesh_light.map(hit.triId, cugar::Vector2f(hit.u, hit.v), light_vertex_geom, &light_pdf, &light_edf);
// evaluate the edf's output along the incoming direction
const cugar::Vector3f f_L = light_edf.f(light_vertex_geom, light_vertex_geom.position, ev.in);
const float d2 = fmaxf(1.0e-10f, hit.t * hit.t);
// compute the MIS weight with next event estimation at the previous vertex
const float G_partial = fabsf(cugar::dot(ev.in, light_vertex_geom.normal_s)) / d2; // NOTE: G_partial doesn't include the dot product between 'in and the normal at the previous vertex
const float p1 = G_partial * p_prev; // NOTE: p_prev is the solid angle probability of sampling the BSDF at the previous vertex, i.e. p_proj * dot(in,normal)
const float p2 = light_pdf;
const float mis_w =
(context.in_bounce == 1 && context.options.direct_lighting_nee) ||
(context.in_bounce > 1 && context.options.indirect_lighting_nee) ? mis_heuristic<MIS_HEURISTIC>(p1, p2) : 1.0f;
// and accumulate the weighted contribution
const cugar::Vector3f out_w = w.xyz() * f_L * mis_w;
// and accumulate the weighted contribution
if (cugar::max_comp(out_w) > 0.0f && cugar::is_finite(out_w))
{
// accumulate to the image
add_in<false>(renderer.fb(FBufferDesc::COMPOSITED_C), pixel_info.pixel, out_w, frame_weight);
// accumulate the per-component value to the proper output channel
if (context.in_bounce == 0)
add_in<false>(renderer.fb(FBufferDesc::DIRECT_C), pixel_info.pixel, out_w, frame_weight);
else
{
if (pixel_info.comp & Bsdf::kDiffuseMask) add_in<true>(renderer.fb(FBufferDesc::DIFFUSE_C), pixel_info.pixel, out_w, frame_weight);
if (pixel_info.comp & Bsdf::kGlossyMask) add_in<true>(renderer.fb(FBufferDesc::SPECULAR_C), pixel_info.pixel, out_w, frame_weight);
}
}
}
// trace a bounce ray
if (do_scatter)
{
// fetch the sampling dimensions
const float z[3] = { samples[3], samples[4], samples[5] }; // use dimensions 3,4,5
// sample a scattering event
cugar::Vector3f out(0.0f);
cugar::Vector3f g(0.0f);
float p(0.0f);
float p_proj(0.0f);
Bsdf::ComponentType out_comp(Bsdf::kAbsorption);
scatter(ev, z, out_comp, out, p, p_proj, g, true, false);
cugar::Vector3f out_w = g * w.xyz();
if (context.in_bounce == 0)
{
renderer.fb(FBufferDesc::DIFFUSE_A, pixel_info.pixel) += cugar::Vector4f(ev.material.diffuse) * frame_weight;
renderer.fb(FBufferDesc::SPECULAR_A, pixel_info.pixel) += (cugar::Vector4f(ev.material.specular) + cugar::Vector4f(1.0f))*0.5f * frame_weight;
}
if (cugar::max_comp(out_w) > 0.0f && cugar::is_finite(out_w))
{
// enqueue the output ray
Ray out_ray;
out_ray.origin = ev.geom.position;
out_ray.dir = out;
out_ray.tmin = 1.0e-4f;
out_ray.tmax = 1.0e8f;
const float out_p = p;
const PixelInfo out_pixel = context.in_bounce ?
pixel_info : // if this sample is a secondary bounce, use the previously selected channel
PixelInfo(pixel_info.pixel, out_comp); // otherwise (i.e. this is the first bounce) choose the output channel for the rest of the path
context.scatter_queue.warp_append(
out_pixel,
out_ray,
cugar::Vector4f(out_w, out_p) );
}
}
}
else
{
// hit the environment - perform sky lighting
}
}
}
void shade_hits(const uint32 in_queue_size, PathTracingContext context, RendererView renderer)
{
const uint32 blockSize(SHADE_HITS_BLOCKSIZE);
const dim3 gridSize(cugar::divide_ri(in_queue_size, blockSize));
// decide whether to perform next-event estimation
const bool do_nee =
((context.in_bounce + 2 <= context.options.max_path_length) &&
((context.in_bounce == 0 && context.options.direct_lighting_nee) ||
(context.in_bounce > 0 && context.options.indirect_lighting_nee)));
// decide whether to evaluate and accumulate emissive surfaces
const bool do_accumulate_emissive =
((context.in_bounce == 0 && context.options.visible_lights) ||
(context.in_bounce == 1 && context.options.direct_lighting_bsdf) ||
(context.in_bounce > 1 && context.options.indirect_lighting_bsdf));
// compute the number of path vertices we want to generate from the eye
const uint32 max_path_vertices = context.options.max_path_length +
((context.options.max_path_length == 2 && context.options.direct_lighting_bsdf) ||
(context.options.max_path_length > 2 && context.options.indirect_lighting_bsdf) ? 1 : 0);
// decide whether to perform scattering
const bool do_scatter = (context.in_bounce + 2 < max_path_vertices);
shade_hits_kernel<blockSize / 32> << < gridSize, blockSize >> > (in_queue_size, context, renderer, 1.0f / float(renderer.instance + 1), do_nee, do_accumulate_emissive, do_scatter);
}
__global__
void solve_occlusion_kernel(const uint32 in_queue_size, PathTracingContext context, RendererView renderer, const float frame_weight)
{
const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < in_queue_size) // *context.shadow_queue.size
{
const PixelInfo pixel_info = context.shadow_queue.pixels[thread_id];
const Hit hit = context.shadow_queue.hits[thread_id];
const cugar::Vector4f w = context.shadow_queue.weights[thread_id];
const cugar::Vector4f w_d = context.shadow_queue.weights_d[thread_id];
const cugar::Vector4f w_g = context.shadow_queue.weights_g[thread_id];
// TODO: break this up in separate diffuse and specular components
if (hit.t < 0.0f)
{
add_in<false>( renderer.fb(FBufferDesc::COMPOSITED_C), pixel_info.pixel, w.xyz(), frame_weight );
if (context.in_bounce == 0)
{
// accumulate the per-component values to the respective output channels
add_in<true>( renderer.fb(FBufferDesc::DIFFUSE_C), pixel_info.pixel, w_d.xyz(), frame_weight );
add_in<true>( renderer.fb(FBufferDesc::SPECULAR_C), pixel_info.pixel, w_g.xyz(), frame_weight );
}
else
{
// accumulate the per-component value to the proper output channel
if (pixel_info.comp & Bsdf::kDiffuseMask) add_in<true>( renderer.fb(FBufferDesc::DIFFUSE_C), pixel_info.pixel, w_d.xyz(), frame_weight );
if (pixel_info.comp & Bsdf::kGlossyMask) add_in<true>( renderer.fb(FBufferDesc::SPECULAR_C), pixel_info.pixel, w_g.xyz(), frame_weight );;
}
}
}
}
void solve_occlusion(const uint32 in_queue_size, PathTracingContext context, RendererView renderer)
{
const uint32 blockSize(128);
const dim3 gridSize(cugar::divide_ri(in_queue_size, blockSize));
solve_occlusion_kernel << < gridSize, blockSize >> > (in_queue_size, context, renderer, 1.0f / float(renderer.instance + 1) );
}
void alloc_queues(
PTOptions options,
const uint32 n_pixels,
PTRayQueue& input_queue,
PTRayQueue& scatter_queue,
PTRayQueue& shadow_queue,
cugar::memory_arena& arena)
{
input_queue.rays = arena.alloc<Ray>(n_pixels);
input_queue.hits = arena.alloc<Hit>(n_pixels);
input_queue.weights = arena.alloc<float4>(n_pixels);
input_queue.weights_d = NULL;
input_queue.weights_g = NULL;
input_queue.pixels = arena.alloc<uint32>(n_pixels);
input_queue.size = arena.alloc<uint32>(1);
scatter_queue.rays = arena.alloc<Ray>(n_pixels);
scatter_queue.hits = arena.alloc<Hit>(n_pixels);
scatter_queue.weights = arena.alloc<float4>(n_pixels);
scatter_queue.weights_d = NULL;
scatter_queue.weights_g = NULL;
scatter_queue.pixels = arena.alloc<uint32>(n_pixels);
scatter_queue.size = arena.alloc<uint32>(1);
shadow_queue.rays = arena.alloc<Ray>(n_pixels);
shadow_queue.hits = arena.alloc<Hit>(n_pixels);
shadow_queue.weights = arena.alloc<float4>(n_pixels);
shadow_queue.weights_d = arena.alloc<float4>(n_pixels);
shadow_queue.weights_g = arena.alloc<float4>(n_pixels);
shadow_queue.pixels = arena.alloc<uint32>(n_pixels);
shadow_queue.size = arena.alloc<uint32>(1);
}
} // anonymous namespace
PathTracer::PathTracer() :
m_generator(32, cugar::LFSRGeneratorMatrix::GOOD_PROJECTIONS),
m_random(&m_generator, 1u, 1351u)
{
}
void PathTracer::init(int argc, char** argv, Renderer& renderer)
{
const uint2 res = renderer.res();
const uint32 n_pixels = res.x * res.y;
// parse the options
m_options.parse(argc, argv);
// pre-alloc queue storage
{
// determine how much storage we will need
cugar::memory_arena arena;
PTRayQueue input_queue;
PTRayQueue scatter_queue;
PTRayQueue shadow_queue;
alloc_queues(
m_options,
n_pixels,
input_queue,
scatter_queue,
shadow_queue,
arena );
fprintf(stderr, " allocating queue storage: %.1f MB\n", float(arena.size) / (1024*1024));
m_memory_pool.alloc(arena.size);
}
// build the set of shifts
const uint32 n_dimensions = 6 * (m_options.max_path_length + 1);
fprintf(stderr, " initializing sampler: %u dimensions\n", n_dimensions);
m_sequence.setup(n_dimensions, SHIFT_RES);
const uint32 n_light_paths = n_pixels;
fprintf(stderr, " creatign mesh lights... started\n");
// initialize the mesh lights sampler
renderer.m_mesh_lights.init( n_light_paths, renderer.m_mesh.view(), renderer.m_mesh_d.view(), renderer.m_texture_views_h.ptr(), renderer.m_texture_views_d.ptr() );
fprintf(stderr, " creatign mesh lights... done\n");
}
void PathTracer::render(const uint32 instance, Renderer& renderer)
{
// pre-multiply the previous frame for blending
renderer.rescale_frame( instance );
//fprintf(stderr, "render started (%u)\n", instance);
const uint2 res = renderer.res();
const uint32 n_pixels = res.x * res.y;
cugar::memory_arena arena( m_memory_pool.ptr() );
PTRayQueue queue1;
PTRayQueue queue2;
PTRayQueue shadow_queue;
alloc_queues(
m_options,
n_pixels,
queue1,
queue2,
shadow_queue,
arena );
cugar::Timer timer;
timer.start();
float path_rt_time = 0.0f;
float shadow_rt_time = 0.0f;
float path_shade_time = 0.0f;
float shadow_shade_time = 0.0f;
// fetch a view of the renderer
RendererView renderer_view = renderer.view(instance);
// setup the samples for this frame
m_sequence.set_instance(instance);
PathTracingContext context;
context.options = m_options;
context.in_bounce = 0;
context.in_queue = queue1;
context.scatter_queue = queue2;
context.shadow_queue = shadow_queue;
context.sequence = m_sequence.view();
generate_primary_rays(context, renderer_view);
CUDA_CHECK(cugar::cuda::sync_and_check_error("generate primary rays"));
for (context.in_bounce = 0;
context.in_bounce < context.options.max_path_length;
context.in_bounce++)
{
uint32 in_queue_size;
// fetch the amount of tasks in the queue
cudaMemcpy(&in_queue_size, context.in_queue.size, sizeof(uint32), cudaMemcpyDeviceToHost);
// check whether there's still any work left
if (in_queue_size == 0)
break;
// trace the rays generated at the previous bounce
//
{
FERMAT_CUDA_TIME(cugar::cuda::ScopedTimer<float> trace_timer(&path_rt_time));
optix::prime::Query query = renderer.m_model->createQuery(RTP_QUERY_TYPE_CLOSEST);
query->setRays(in_queue_size, Ray::format, RTP_BUFFER_TYPE_CUDA_LINEAR, context.in_queue.rays);
query->setHits(in_queue_size, Hit::format, RTP_BUFFER_TYPE_CUDA_LINEAR, context.in_queue.hits);
query->execute(0);
CUDA_CHECK(cugar::cuda::sync_and_check_error("trace"));
}
// reset the output queue counters
cudaMemset(context.shadow_queue.size, 0x00, sizeof(uint32));
cudaMemset(context.scatter_queue.size, 0x00, sizeof(uint32));
CUDA_CHECK(cugar::cuda::check_error("memset"));
// perform lighting at this bounce
//
{
FERMAT_CUDA_TIME(cugar::cuda::ScopedTimer<float> shade_timer(&path_shade_time));
shade_hits(in_queue_size, context, renderer_view);
CUDA_CHECK(cugar::cuda::sync_and_check_error("shade hits"));
}
// trace & accumulate occlusion queries
{
uint32 shadow_queue_size;
cudaMemcpy(&shadow_queue_size, context.shadow_queue.size, sizeof(uint32), cudaMemcpyDeviceToHost);
// trace the rays
//
if (shadow_queue_size)
{
FERMAT_CUDA_TIME(cugar::cuda::ScopedTimer<float> trace_timer(&shadow_rt_time));
optix::prime::Query query = renderer.m_model->createQuery(RTP_QUERY_TYPE_CLOSEST);
query->setRays(shadow_queue_size, Ray::format, RTP_BUFFER_TYPE_CUDA_LINEAR, shadow_queue.rays);
query->setHits(shadow_queue_size, Hit::format, RTP_BUFFER_TYPE_CUDA_LINEAR, shadow_queue.hits);
query->execute(0);
CUDA_CHECK(cugar::cuda::sync_and_check_error("trace occlusion"));
}
// shade the results
//
if (shadow_queue_size)
{
FERMAT_CUDA_TIME(cugar::cuda::ScopedTimer<float> shade_timer(&shadow_shade_time));
solve_occlusion(shadow_queue_size, context, renderer_view);
CUDA_CHECK(cugar::cuda::sync_and_check_error("solve occlusion"));
}
}
std::swap(context.in_queue, context.scatter_queue);
}
timer.stop();
const float time = timer.seconds();
// clear the global timer at instance zero
if (instance == 0)
m_time = time;
else
m_time += time;
fprintf(stderr, "\r %.1fs (%.1fms = rt[%.1fms + %.1fms] + shade[%.1fms + %.1fms]) ",
m_time,
time * 1000.0f,
path_rt_time * 1000.0f,
shadow_rt_time * 1000.0f,
path_shade_time * 1000.0f,
shadow_shade_time * 1000.0f);
renderer.update_variances( instance );
}
|
86e4695530c738d14ebc77aa468e058703ae4beb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zlaqps2_gpu.cu normal z -> c, Fri Jul 18 17:34:12 2014
*/
#include "common_magma.h"
#define PRECISION_c
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
__global__ void magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv,
magmaFloatComplex *c, magmaFloatComplex *dwork,
magmaFloatComplex *tau);
/* --------------------------------------------------------------------------- */
/**
Purpose
-------
CLAQPS computes a step of QR factorization with column pivoting
of a complex M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0
@param[in]
offset INTEGER
The number of rows of A that have been factorized in
previous steps.
@param[in]
NB INTEGER
The number of columns to factorize.
@param[out]
kb INTEGER
The number of columns actually factorized.
@param[in,out]
A COMPLEX array, dimension (LDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[in,out]
jpvt INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
@param[out]
tau COMPLEX array, dimension (KB)
The scalar factors of the elementary reflectors.
@param[in,out]
VN1 REAL array, dimension (N)
The vector with the partial column norms.
@param[in,out]
VN2 REAL array, dimension (N)
The vector with the exact column norms.
@param[in,out]
AUXV COMPLEX array, dimension (NB)
Auxiliar vector.
@param[in,out]
F COMPLEX array, dimension (LDF,NB)
Matrix F' = L*Y'*A.
@param[in]
ldf INTEGER
The leading dimension of the array F. LDF >= max(1,N).
@ingroup magma_cgeqp3_aux
********************************************************************/
extern "C" magma_int_t
magma_claqps2_gpu(magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaFloatComplex *A, magma_int_t lda,
magma_int_t *jpvt, magmaFloatComplex *tau,
float *vn1, float *vn2,
magmaFloatComplex *auxv,
magmaFloatComplex *F, magma_int_t ldf)
{
#define A(i, j) (A + (i) + (j)*(lda ))
#define F(i, j) (F + (i) + (j)*(ldf ))
magmaFloatComplex c_zero = MAGMA_C_MAKE( 0.,0.);
magmaFloatComplex c_one = MAGMA_C_MAKE( 1.,0.);
magmaFloatComplex c_neg_one = MAGMA_C_MAKE(-1.,0.);
magma_int_t ione = 1;
magma_int_t i__1, i__2;
magma_int_t k, rk;
magmaFloatComplex tauk;
magma_int_t pvt, itemp;
float tol3z;
magmaFloatComplex *dAkk = auxv;
auxv+=nb;
float lsticc, *lsticcs;
magma_smalloc( &lsticcs, 1+256*(n+255)/256 );
tol3z = magma_ssqrt( lapackf77_slamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_isamax( n-k, &vn1[k], ione );
if (pvt != k) {
magmablas_cswap( k, F(pvt,0), ldf, F(k,0), ldf);
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
#if (defined(PRECISION_d) || defined(PRECISION_z))
//magma_dswap( 1, &vn1[pvt], 1, &vn1[k], 1 );
//magma_dswap( 1, &vn2[pvt], 1, &vn2[k], 1 );
magma_dswap( 2, &vn1[pvt], n+offset, &vn1[k], n+offset);
#else
//magma_sswap( 1, &vn1[pvt], 1, &vn1[k], 1 );
//magma_sswap( 1, &vn2[pvt], 1, &vn2[k], 1 );
magma_sswap(2, &vn1[pvt], n+offset, &vn1[k], n+offset);
#endif
magmablas_cswap( m, A(0,pvt), ione, A(0, k), ione );
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'.
Optimization: multiply with beta=0; wait for vector and subtract */
if (k > 0) {
/*#if (defined(PRECISION_c) || defined(PRECISION_z))
for (j = 0; j < k; ++j){
*F(k,j) = MAGMA_C_CNJG( *F(k,j) );
}
#endif*/
magmablas_cgemv( MagmaNoTrans, m-rk, k,
c_neg_one, A(rk, 0), lda,
F(k, 0), ldf,
c_one, A(rk, k), ione );
/*#if (defined(PRECISION_c) || defined(PRECISION_z))
for (j = 0; j < k; ++j) {
*F(k,j) = MAGMA_C_CNJG( *F(k,j) );
}
#endif*/
}
/* Generate elementary reflector H(k). */
magma_clarfg_gpu(m-rk, A(rk, k), A(rk + 1, k), &tau[k], &vn1[k], &dAkk[k]);
//Akk = *A(rk, k);
//*A(rk, k) = c_one;
//magma_cgetvector( 1, A(rk, k), 1, &Akk, 1 );
// this needs to be done outside clarfg to avoid the race condition.
magma_csetvector( 1, &c_one, 1, A(rk, k), 1 );
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1 || k > 0 ) magma_cgetvector( 1, &tau[k], 1, &tauk, 1 );
if (k < n-1) {
magmablas_cgemv( MagmaConjTrans, m-rk, n-k-1,
tauk, A( rk, k+1 ), lda,
A( rk, k ), 1,
c_zero, F( k+1, k ), 1 );
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/*z__1 = MAGMA_C_NEGATE( tauk );
magmablas_cgemv( MagmaConjTrans, m-rk, k,
z__1, A(rk, 0), lda,
A(rk, k), ione,
c_zero, auxv, ione );*/
hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m-rk, A(rk, 0), lda,
A(rk, k), auxv, tau+k);
/* I think we only need stricly lower-triangular part */
magmablas_cgemv( MagmaNoTrans, n-k-1, k,
c_one, F(k+1,0), ldf,
auxv, ione,
c_one, F(k+1,k), ione );
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A'v with original A, so no right-looking */
magma_cgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2,
c_neg_one, A(rk, 0 ), lda,
F(k+1,0 ), ldf,
c_one, A(rk, k+1), lda );
}
/* Update partial column norms. */
if (rk < min(m, n+offset)-1){
magmablas_scnrm2_row_check_adjust(n-k-1, tol3z, &vn1[k+1],
&vn2[k+1], A(rk,k+1), lda, lsticcs);
#if defined(PRECISION_d) || defined(PRECISION_z)
magma_dgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#else
magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#endif
}
//*A(rk, k) = Akk;
//magma_csetvector( 1, &Akk, 1, A(rk, k), 1 );
//magmablas_clacpy(MagmaUpperLower, 1, 1, dAkk, 1, A(rk, k), 1);
++k;
}
// restore the diagonals
magma_ccopymatrix( 1, k, dAkk, 1, A(offset, 0), lda+1 );
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_cgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, A(rk+1, 0 ), lda,
F(*kb, 0 ), ldf,
c_one, A(rk+1, *kb), lda );
}
/* Recomputation of difficult columns. */
if( lsticc > 0 ) {
// printf( " -- recompute dnorms --\n" );
magmablas_scnrm2_check(m-rk-1, n-*kb, A(rk+1,*kb), lda,
&vn1[*kb], lsticcs);
#if defined(PRECISION_d) || defined(PRECISION_z)
magma_dcopymatrix( n-*kb, 1, &vn1[*kb], *kb, &vn2[*kb], *kb);
#else
magma_scopymatrix( n-*kb, 1, &vn1[*kb], *kb, &vn2[*kb], *kb);
#endif
}
magma_free(lsticcs);
return MAGMA_SUCCESS;
} /* magma_claqps */
| 86e4695530c738d14ebc77aa468e058703ae4beb.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zlaqps2_gpu.cu normal z -> c, Fri Jul 18 17:34:12 2014
*/
#include "common_magma.h"
#define PRECISION_c
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
__global__ void magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv,
magmaFloatComplex *c, magmaFloatComplex *dwork,
magmaFloatComplex *tau);
/* --------------------------------------------------------------------------- */
/**
Purpose
-------
CLAQPS computes a step of QR factorization with column pivoting
of a complex M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0
@param[in]
offset INTEGER
The number of rows of A that have been factorized in
previous steps.
@param[in]
NB INTEGER
The number of columns to factorize.
@param[out]
kb INTEGER
The number of columns actually factorized.
@param[in,out]
A COMPLEX array, dimension (LDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[in,out]
jpvt INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
@param[out]
tau COMPLEX array, dimension (KB)
The scalar factors of the elementary reflectors.
@param[in,out]
VN1 REAL array, dimension (N)
The vector with the partial column norms.
@param[in,out]
VN2 REAL array, dimension (N)
The vector with the exact column norms.
@param[in,out]
AUXV COMPLEX array, dimension (NB)
Auxiliar vector.
@param[in,out]
F COMPLEX array, dimension (LDF,NB)
Matrix F' = L*Y'*A.
@param[in]
ldf INTEGER
The leading dimension of the array F. LDF >= max(1,N).
@ingroup magma_cgeqp3_aux
********************************************************************/
extern "C" magma_int_t
magma_claqps2_gpu(magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaFloatComplex *A, magma_int_t lda,
magma_int_t *jpvt, magmaFloatComplex *tau,
float *vn1, float *vn2,
magmaFloatComplex *auxv,
magmaFloatComplex *F, magma_int_t ldf)
{
#define A(i, j) (A + (i) + (j)*(lda ))
#define F(i, j) (F + (i) + (j)*(ldf ))
magmaFloatComplex c_zero = MAGMA_C_MAKE( 0.,0.);
magmaFloatComplex c_one = MAGMA_C_MAKE( 1.,0.);
magmaFloatComplex c_neg_one = MAGMA_C_MAKE(-1.,0.);
magma_int_t ione = 1;
magma_int_t i__1, i__2;
magma_int_t k, rk;
magmaFloatComplex tauk;
magma_int_t pvt, itemp;
float tol3z;
magmaFloatComplex *dAkk = auxv;
auxv+=nb;
float lsticc, *lsticcs;
magma_smalloc( &lsticcs, 1+256*(n+255)/256 );
tol3z = magma_ssqrt( lapackf77_slamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_isamax( n-k, &vn1[k], ione );
if (pvt != k) {
magmablas_cswap( k, F(pvt,0), ldf, F(k,0), ldf);
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
#if (defined(PRECISION_d) || defined(PRECISION_z))
//magma_dswap( 1, &vn1[pvt], 1, &vn1[k], 1 );
//magma_dswap( 1, &vn2[pvt], 1, &vn2[k], 1 );
magma_dswap( 2, &vn1[pvt], n+offset, &vn1[k], n+offset);
#else
//magma_sswap( 1, &vn1[pvt], 1, &vn1[k], 1 );
//magma_sswap( 1, &vn2[pvt], 1, &vn2[k], 1 );
magma_sswap(2, &vn1[pvt], n+offset, &vn1[k], n+offset);
#endif
magmablas_cswap( m, A(0,pvt), ione, A(0, k), ione );
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'.
Optimization: multiply with beta=0; wait for vector and subtract */
if (k > 0) {
/*#if (defined(PRECISION_c) || defined(PRECISION_z))
for (j = 0; j < k; ++j){
*F(k,j) = MAGMA_C_CNJG( *F(k,j) );
}
#endif*/
magmablas_cgemv( MagmaNoTrans, m-rk, k,
c_neg_one, A(rk, 0), lda,
F(k, 0), ldf,
c_one, A(rk, k), ione );
/*#if (defined(PRECISION_c) || defined(PRECISION_z))
for (j = 0; j < k; ++j) {
*F(k,j) = MAGMA_C_CNJG( *F(k,j) );
}
#endif*/
}
/* Generate elementary reflector H(k). */
magma_clarfg_gpu(m-rk, A(rk, k), A(rk + 1, k), &tau[k], &vn1[k], &dAkk[k]);
//Akk = *A(rk, k);
//*A(rk, k) = c_one;
//magma_cgetvector( 1, A(rk, k), 1, &Akk, 1 );
// this needs to be done outside clarfg to avoid the race condition.
magma_csetvector( 1, &c_one, 1, A(rk, k), 1 );
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1 || k > 0 ) magma_cgetvector( 1, &tau[k], 1, &tauk, 1 );
if (k < n-1) {
magmablas_cgemv( MagmaConjTrans, m-rk, n-k-1,
tauk, A( rk, k+1 ), lda,
A( rk, k ), 1,
c_zero, F( k+1, k ), 1 );
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/*z__1 = MAGMA_C_NEGATE( tauk );
magmablas_cgemv( MagmaConjTrans, m-rk, k,
z__1, A(rk, 0), lda,
A(rk, k), ione,
c_zero, auxv, ione );*/
magma_cgemv_kernel3<<< k, BLOCK_SIZE, 0, magma_stream >>>(m-rk, A(rk, 0), lda,
A(rk, k), auxv, tau+k);
/* I think we only need stricly lower-triangular part */
magmablas_cgemv( MagmaNoTrans, n-k-1, k,
c_one, F(k+1,0), ldf,
auxv, ione,
c_one, F(k+1,k), ione );
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A'v with original A, so no right-looking */
magma_cgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2,
c_neg_one, A(rk, 0 ), lda,
F(k+1,0 ), ldf,
c_one, A(rk, k+1), lda );
}
/* Update partial column norms. */
if (rk < min(m, n+offset)-1){
magmablas_scnrm2_row_check_adjust(n-k-1, tol3z, &vn1[k+1],
&vn2[k+1], A(rk,k+1), lda, lsticcs);
#if defined(PRECISION_d) || defined(PRECISION_z)
magma_dgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#else
magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#endif
}
//*A(rk, k) = Akk;
//magma_csetvector( 1, &Akk, 1, A(rk, k), 1 );
//magmablas_clacpy(MagmaUpperLower, 1, 1, dAkk, 1, A(rk, k), 1);
++k;
}
// restore the diagonals
magma_ccopymatrix( 1, k, dAkk, 1, A(offset, 0), lda+1 );
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_cgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, A(rk+1, 0 ), lda,
F(*kb, 0 ), ldf,
c_one, A(rk+1, *kb), lda );
}
/* Recomputation of difficult columns. */
if( lsticc > 0 ) {
// printf( " -- recompute dnorms --\n" );
magmablas_scnrm2_check(m-rk-1, n-*kb, A(rk+1,*kb), lda,
&vn1[*kb], lsticcs);
#if defined(PRECISION_d) || defined(PRECISION_z)
magma_dcopymatrix( n-*kb, 1, &vn1[*kb], *kb, &vn2[*kb], *kb);
#else
magma_scopymatrix( n-*kb, 1, &vn1[*kb], *kb, &vn2[*kb], *kb);
#endif
}
magma_free(lsticcs);
return MAGMA_SUCCESS;
} /* magma_claqps */
|
c7a2f673e88b625f99cd3a4f7c3ff0757930de22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include "modulated_deform_conv_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void modulated_deformable_im2col_cuda(
const Tensor data_im, const Tensor data_offset, const Tensor data_mask,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group, Tensor data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_im_, data_offset_, data_mask_, height_im,
width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group, batch_size,
channels, deformable_group, height_col, width_col, data_col_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void modulated_deformable_col2im_cuda(
const Tensor data_col, const Tensor data_offset, const Tensor data_mask,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group, Tensor grad_im) {
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels =
channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_offset_, data_mask_, channels,
height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void modulated_deformable_col2im_coord_cuda(
const Tensor data_col, const Tensor data_im, const Tensor data_offset,
const Tensor data_mask, const int batch_size, const int channels,
const int height_im, const int width_im, const int height_col,
const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int deformable_group,
Tensor grad_offset, Tensor grad_mask) {
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h *
kernel_w * deformable_group;
const int channel_per_deformable_group =
channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_im_, data_offset_, data_mask_,
channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, batch_size,
2 * kernel_h * kernel_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_, grad_mask_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void ModulatedDeformConvForwardCUDAKernelLauncher(
Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset,
Tensor mask, Tensor output, Tensor columns, int kernel_h, int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, const int group,
const int deformable_group, const bool with_bias) {
at::DeviceGuard guard(input.device());
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
if (kernel_h_ != kernel_h || kernel_w_ != kernel_w)
AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).",
kernel_h_, kernel_w, kernel_h_, kernel_w_);
if (channels != channels_kernel * group)
AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
channels, channels_kernel * group);
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
if (ones.ndimension() != 2 ||
ones.size(0) * ones.size(1) < height_out * width_out) {
// Resize plane and fill with ones...
ones = at::ones({height_out, width_out}, input.options());
}
// resize output
output = output.view({batch, channels_out, height_out, width_out}).zero_();
// resize temporary columns
columns =
at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out},
input.options());
output = output.view({output.size(0), group, output.size(1) / group,
output.size(2), output.size(3)});
for (int b = 0; b < batch; b++) {
modulated_deformable_im2col_cuda(
input[b], offset[b], mask[b], 1, channels, height, width, height_out,
width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group, columns);
// divide into group
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3)});
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
for (int g = 0; g < group; g++) {
output[b][g] = output[b][g]
.flatten(1)
.addmm_(weight[g].flatten(1), columns[g])
.view_as(output[b][g]);
}
weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
weight.size(3), weight.size(4)});
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
}
output = output.view({output.size(0), output.size(1) * output.size(2),
output.size(3), output.size(4)});
if (with_bias) {
output += bias.view({1, bias.size(0), 1, 1});
}
}
void ModulatedDeformConvBackwardCUDAKernelLauncher(
Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset,
Tensor mask, Tensor columns, Tensor grad_input, Tensor grad_weight,
Tensor grad_bias, Tensor grad_offset, Tensor grad_mask, Tensor grad_output,
int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h,
int pad_w, int dilation_h, int dilation_w, int group, int deformable_group,
const bool with_bias) {
at::DeviceGuard guard(input.device());
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
if (kernel_h_ != kernel_h || kernel_w_ != kernel_w)
AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).",
kernel_h_, kernel_w, kernel_h_, kernel_w_);
if (channels != channels_kernel * group)
AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
channels, channels_kernel * group);
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
if (ones.ndimension() != 2 ||
ones.size(0) * ones.size(1) < height_out * width_out) {
// Resize plane and fill with ones...
ones = at::ones({height_out, width_out}, input.options());
}
grad_input = grad_input.view({batch, channels, height, width});
columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out},
input.options());
grad_output =
grad_output.view({grad_output.size(0), group, grad_output.size(1) / group,
grad_output.size(2), grad_output.size(3)});
for (int b = 0; b < batch; b++) {
// divide int group
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3)});
for (int g = 0; g < group; g++) {
columns[g].addmm_(weight[g].flatten(1).transpose(0, 1),
grad_output[b][g].flatten(1), 0.0f, 1.0f);
}
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
weight.size(3), weight.size(4)});
// gradient w.r.t. input coordinate data
modulated_deformable_col2im_coord_cuda(
columns, input[b], offset[b], mask[b], 1, channels, height, width,
height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b],
grad_mask[b]);
// gradient w.r.t. input data
modulated_deformable_col2im_cuda(
columns, offset[b], mask[b], 1, channels, height, width, height_out,
width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group, grad_input[b]);
// gradient w.r.t. weight, dWeight should accumulate across the batch and
// group
modulated_deformable_im2col_cuda(
input[b], offset[b], mask[b], 1, channels, height, width, height_out,
width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group, columns);
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
grad_weight = grad_weight.view({group, grad_weight.size(0) / group,
grad_weight.size(1), grad_weight.size(2),
grad_weight.size(3)});
if (with_bias)
grad_bias = grad_bias.view({group, grad_bias.size(0) / group});
for (int g = 0; g < group; g++) {
grad_weight[g] =
grad_weight[g]
.flatten(1)
.addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1))
.view_as(grad_weight[g]);
if (with_bias) {
grad_bias[g] =
grad_bias[g]
.view({-1, 1})
.addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1}))
.view(-1);
}
}
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1),
grad_weight.size(2), grad_weight.size(3),
grad_weight.size(4)});
if (with_bias)
grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)});
}
grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1),
grad_output.size(2), grad_output.size(3),
grad_output.size(4)});
}
| c7a2f673e88b625f99cd3a4f7c3ff0757930de22.cu | // Copyright (c) OpenMMLab. All rights reserved
#include "modulated_deform_conv_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void modulated_deformable_im2col_cuda(
const Tensor data_im, const Tensor data_offset, const Tensor data_mask,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group, Tensor data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
modulated_deformable_im2col_gpu_kernel<<<
GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im,
width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group, batch_size,
channels, deformable_group, height_col, width_col, data_col_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void modulated_deformable_col2im_cuda(
const Tensor data_col, const Tensor data_offset, const Tensor data_mask,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group, Tensor grad_im) {
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels =
channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
modulated_deformable_col2im_gpu_kernel<<<
GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_offset_, data_mask_, channels,
height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void modulated_deformable_col2im_coord_cuda(
const Tensor data_col, const Tensor data_im, const Tensor data_offset,
const Tensor data_mask, const int batch_size, const int channels,
const int height_im, const int width_im, const int height_col,
const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int deformable_group,
Tensor grad_offset, Tensor grad_mask) {
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h *
kernel_w * deformable_group;
const int channel_per_deformable_group =
channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data_ptr<scalar_t>();
modulated_deformable_col2im_coord_gpu_kernel<<<
GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_im_, data_offset_, data_mask_,
channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, batch_size,
2 * kernel_h * kernel_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_, grad_mask_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void ModulatedDeformConvForwardCUDAKernelLauncher(
Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset,
Tensor mask, Tensor output, Tensor columns, int kernel_h, int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, const int group,
const int deformable_group, const bool with_bias) {
at::DeviceGuard guard(input.device());
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
if (kernel_h_ != kernel_h || kernel_w_ != kernel_w)
AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).",
kernel_h_, kernel_w, kernel_h_, kernel_w_);
if (channels != channels_kernel * group)
AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
channels, channels_kernel * group);
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
if (ones.ndimension() != 2 ||
ones.size(0) * ones.size(1) < height_out * width_out) {
// Resize plane and fill with ones...
ones = at::ones({height_out, width_out}, input.options());
}
// resize output
output = output.view({batch, channels_out, height_out, width_out}).zero_();
// resize temporary columns
columns =
at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out},
input.options());
output = output.view({output.size(0), group, output.size(1) / group,
output.size(2), output.size(3)});
for (int b = 0; b < batch; b++) {
modulated_deformable_im2col_cuda(
input[b], offset[b], mask[b], 1, channels, height, width, height_out,
width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group, columns);
// divide into group
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3)});
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
for (int g = 0; g < group; g++) {
output[b][g] = output[b][g]
.flatten(1)
.addmm_(weight[g].flatten(1), columns[g])
.view_as(output[b][g]);
}
weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
weight.size(3), weight.size(4)});
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
}
output = output.view({output.size(0), output.size(1) * output.size(2),
output.size(3), output.size(4)});
if (with_bias) {
output += bias.view({1, bias.size(0), 1, 1});
}
}
void ModulatedDeformConvBackwardCUDAKernelLauncher(
Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset,
Tensor mask, Tensor columns, Tensor grad_input, Tensor grad_weight,
Tensor grad_bias, Tensor grad_offset, Tensor grad_mask, Tensor grad_output,
int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h,
int pad_w, int dilation_h, int dilation_w, int group, int deformable_group,
const bool with_bias) {
at::DeviceGuard guard(input.device());
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
if (kernel_h_ != kernel_h || kernel_w_ != kernel_w)
AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).",
kernel_h_, kernel_w, kernel_h_, kernel_w_);
if (channels != channels_kernel * group)
AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
channels, channels_kernel * group);
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
if (ones.ndimension() != 2 ||
ones.size(0) * ones.size(1) < height_out * width_out) {
// Resize plane and fill with ones...
ones = at::ones({height_out, width_out}, input.options());
}
grad_input = grad_input.view({batch, channels, height, width});
columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out},
input.options());
grad_output =
grad_output.view({grad_output.size(0), group, grad_output.size(1) / group,
grad_output.size(2), grad_output.size(3)});
for (int b = 0; b < batch; b++) {
// divide int group
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3)});
for (int g = 0; g < group; g++) {
columns[g].addmm_(weight[g].flatten(1).transpose(0, 1),
grad_output[b][g].flatten(1), 0.0f, 1.0f);
}
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
weight.size(3), weight.size(4)});
// gradient w.r.t. input coordinate data
modulated_deformable_col2im_coord_cuda(
columns, input[b], offset[b], mask[b], 1, channels, height, width,
height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b],
grad_mask[b]);
// gradient w.r.t. input data
modulated_deformable_col2im_cuda(
columns, offset[b], mask[b], 1, channels, height, width, height_out,
width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group, grad_input[b]);
// gradient w.r.t. weight, dWeight should accumulate across the batch and
// group
modulated_deformable_im2col_cuda(
input[b], offset[b], mask[b], 1, channels, height, width, height_out,
width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group, columns);
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
grad_weight = grad_weight.view({group, grad_weight.size(0) / group,
grad_weight.size(1), grad_weight.size(2),
grad_weight.size(3)});
if (with_bias)
grad_bias = grad_bias.view({group, grad_bias.size(0) / group});
for (int g = 0; g < group; g++) {
grad_weight[g] =
grad_weight[g]
.flatten(1)
.addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1))
.view_as(grad_weight[g]);
if (with_bias) {
grad_bias[g] =
grad_bias[g]
.view({-1, 1})
.addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1}))
.view(-1);
}
}
columns =
columns.view({columns.size(0) * columns.size(1), columns.size(2)});
grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1),
grad_weight.size(2), grad_weight.size(3),
grad_weight.size(4)});
if (with_bias)
grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)});
}
grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1),
grad_output.size(2), grad_output.size(3),
grad_output.size(4)});
}
|
bcd3a5c0cc6d92f96facb73cc55c9cd58c2b4723.hip | // !!! This is a file automatically generated by hipify!!!
/* -*- Mode: C ; indent-tabs-mode: nil ; c-file-style: "stroustrup" -*-
CS 6620 - Compilers
Stencil App Language Project
Authors: Greg Faust, Sal Valente
File: Model.cu Contains the analytical model for predicting stencil app latencies based on input sizes and trapezoid height.
TODO For now, this is more or less a direct translation of the MatLab code.
All of that code assumes all the data and blocks are the same size in all dimensions.
Once we get this working, we might consider relaxing that assumption.
Also, MatLab is not typed.
Apparently, the current latencies are in terms of GPU cycles.
To avoid step functions, I have used doubles throughout.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <sys/time.h>
#include "Model.h"
// This turns on the debugging capabilities of things defined in cutil.h
#define _DEBUG
// First, give some simple math helper functions.
double iexp(double base, int power)
{
double retval = 1;
for (int i=0; i<power; i++) retval *= base;
return retval;
}
// This will return the largest int that goes into composite, possibly with leftover.
// That is, it is floor of the real log.
int ilog(int root, int composite)
{
double retd = pow(composite, 1.0/root);
// Give it a little wiggle room for floating point errors.
return (int)(retd + .02);
}
// Some CUDA helper furnctions.
inline dim3 filldim3(dim3 * d3, int x = 1, int y = 1, int z = 1)
{
d3->x = x;
d3->y = y;
d3->z = z;
return *d3;
}
inline dim3 copydim3(dim3 in, dim3 out)
{
out.x = in.x;
out.y = in.y;
out.z = in.z;
return out;
}
// A debug helper.
// We can probably get rid of this before code freeze.
int printCudaDevices()
{
int curDev;
CUDA_SAFE_CALL(hipGetDevice(&curDev));
fprintf(stderr, "Current cuda device is: %d.\n", curDev);
int devCount;
CUDA_SAFE_CALL(hipGetDeviceCount(&devCount));
fprintf(stderr, "There are %d cuda devices on this machine.\n", devCount);
int i;
CudaDeviceProps_t * devProps = (CudaDeviceProps_t *)malloc(sizeof(CudaDeviceProps_t));
for (i=0; i<devCount; i++)
{
CUDA_SAFE_CALL(hipGetDeviceProperties(devProps, i));
printCudaDeviceProps(devProps);
}
free(devProps);
return curDev;
}
// A debug helper.
// We can probably get rid of this before code freeze.
void printCudaDeviceProps (CudaDeviceProps_t * devProps)
{
fprintf(stdout, "CUDA device \"%s\" properites.\n", devProps->name);
fprintf(stdout, "Release %d.%d.\n", devProps->major, devProps->minor);
// fprintf(stdout, "Compute Mode=%d.\n", devProps->computeMode);
fprintf(stdout, "Global Memory Size=%zd.\n", devProps->totalGlobalMem);
fprintf(stdout, "Shared Memory Per Block=%zd.\n", devProps->sharedMemPerBlock);
fprintf(stdout, "Registers Per Block=%d.\n", devProps->regsPerBlock);
fprintf(stdout, "Clock Rate (KiloHertz)=%d.\n", devProps->clockRate);
fprintf(stdout, "Warp Size=%d.\n", devProps->warpSize);
fprintf(stdout, "Maximum Threads per Block=%d.\n", devProps->maxThreadsPerBlock);
fprintf(stdout, "Maximum Block Dimensions=[%d, %d, %d].\n", devProps->maxThreadsDim[0], devProps->maxThreadsDim[1], devProps->maxThreadsDim[2]);
fprintf(stdout, "Maximum Grid Dimensions=[%d, %d, %d].\n", devProps->maxGridSize[0], devProps->maxGridSize[1], devProps->maxGridSize[2]);
}
// We need these as part of the runtime system.
CudaDeviceProps_t * getCudaDeviceProps (int devNum)
{
CudaDeviceProps_t * devProps = (CudaDeviceProps_t *)malloc(sizeof(CudaDeviceProps_t));
CUDA_SAFE_CALL(hipGetDeviceProperties(devProps, devNum));
return devProps;
}
CudaDeviceProps_t * getCurrentCudaDeviceProps ()
{
int curDev;
CUDA_SAFE_CALL(hipGetDevice(&curDev));
return getCudaDeviceProps(curDev);
}
// We need this as part of the runtime system.
CudaFunctionAtts_t * getCudaFunctionAtts (char * functionName)
{
CudaFunctionAtts_t * FAs = (CudaFunctionAtts_t * )malloc(sizeof(CudaFunctionAtts_t));
CUDA_SAFE_CALL(hipFuncGetAttributes(FAs, "groupClumps")); // "_Z11groupClumpssiiPtPjS0_p8fragment"));
// fprintf(stderr, "Max Threads per block in groupClumps=%d and register count=%d\n", FAs->maxThreadsPerBlock, FAs->numRegs);
return FAs;
}
// Make a SACuda Latency structure, and give default values to all fields.
SACudaLats_t * makeSACudaLats()
{
SACudaLats_t * SACLs = (SACudaLats_t *)(malloc(sizeof(SACudaLats_t)));
SACLs->totalLat = 0;
SACLs->avgLoadLat = 0;
SACLs->avgStoreLat = 0;
SACLs->avgCompLat = 0;
SACLs->avgSetupCompLat = 0;
SACLs->avgEmbedMemLat = 0;
SACLs->avgGlobalSyncLat = 0;
return SACLs;
};
// Dispose of a SACuda Latency structure.
void disposeSACudaLats(SACudaLats_t * SACLs)
{
free(SACLs);
SACLs=NULL;
}
// Print out SACL values to aid in debugging.
void printSACLs(SACudaLats_t * SACLs, int pyramidH)
{
fprintf(stderr, "SACL avgLoadLat=%f, total=%f\n", SACLs->avgLoadLat, SACLs->avgLoadLat * pyramidH);
fprintf(stderr, "SACL avgStoreLat=%f, total=%f\n", SACLs->avgStoreLat, SACLs->avgStoreLat * pyramidH);
fprintf(stderr, "SACL avgEmbedMemLat=%f, total=%f\n", SACLs->avgEmbedMemLat, SACLs->avgEmbedMemLat * pyramidH);
fprintf(stderr, "SACL avgCompLat=%f, total=%f\n", SACLs->avgCompLat, SACLs->avgCompLat * pyramidH);
fprintf(stderr, "SACL avgSetupCompLat=%f, total=%f\n", SACLs->avgSetupCompLat, SACLs->avgSetupCompLat * pyramidH);
fprintf(stderr, "SACL avgGlobalSynchLat=%f, total=%f\n", SACLs->avgGlobalSyncLat, SACLs->avgGlobalSyncLat * pyramidH);
fprintf(stderr, "SACL TotalLat=%f, total=%f\n", SACLs->totalLat, SACLs->totalLat * pyramidH);
}
static SAProps_t * SAPs;
// Make a SAProps structure, and give default values to some fields.
dim3 initSAProps(int dims, dim3 input_size, dim3 stencil_size, int iterations, int dataESize, char * kernelName)
{
SAPs = (SAProps_t *)(malloc(sizeof(SAProps_t)));
// These all come directly from the input args to the app or in the stencil language.
SAPs->numDims = dims;
SAPs->dataDims = input_size;
SAPs->haloDims = stencil_size;
SAPs->iterations = iterations;
SAPs->dataElemSize = dataESize;
// TODO In order to get these right, we will need a new stencil app declaration of data reads in the CellValue calculation.
// This used to be 1 for all 4 samples except hotSpot (where it was 2).
// Now it is 1 for ALL apps.
SAPs->setupGlobalLoadsPerCell = 1;
// This used to be 0 for all 4 samples except pathfinder (where it was 1).
// But now it will be 1 for any app that accesses global read only data.
// The model does not seem very sensitive to this value.
SAPs->loopGlobalLoadsPerCell = 1;
// This is 1 for all 4 samples except cell (where it is 2 because of the dimensionality.
// I see no way to derive this value from the app at all.
SAPs->bankConflict = (dims < 3) ? 1 : 2;
// Now we will calculate the block dimensions.
// Since the app will ALWAYS run faster with larger block size,
// We will make the blocks as big as they can be on the device.
// TODO We could perhaps also look at the shared memory and make sure we fit.
// But for now, this is not a limiting factor for any of the sample apps.
CudaFunctionAtts_t * CFAs = (CudaFunctionAtts_t *)malloc(sizeof(CudaFunctionAtts_t));
SAPs->CFAs = CFAs;
CUDA_SAFE_CALL(hipFuncGetAttributes(CFAs, kernelName));
// fprintf(stderr, "Max Threads per block in %s=%d, register count=%d, sharedMemUsage=%d.\n", kernelName, CFAs->maxThreadsPerBlock, CFAs->numRegs, CFAs->sharedSizeBytes);
int blockLen = ilog(dims, CFAs->maxThreadsPerBlock);
// fprintf(stderr, "Block Length=%d.\n", blockLen);
// The block size can't be larger than the data size!
SAPs->blockDims.x = MIN(blockLen, SAPs->dataDims.x);
SAPs->blockDims.y = SAPs->blockDims.z = 1;
if (dims > 1) SAPs->blockDims.y = MIN(blockLen, SAPs->dataDims.y);
if (dims > 2) SAPs->blockDims.z = MIN(blockLen, SAPs->dataDims.z);
// Fill in the cuda device properties.
SAPs->CDPs = getCurrentCudaDeviceProps ();
return SAPs->blockDims;
}
void disposeSAProps(SAProps_t * SAPs)
{
free(SAPs);
SAPs=NULL;
}
// TODO It would be better if this were not a macro.
// However, it calls kernel functions.
// To make it a function, we would have to figure out how to get
// function ptrs for kernels, and with names generated by the stencil tool.
// NOTE: I tried calling the kernel 3 times and averaging, but it did not materially improve things.
static struct timeval starttime, endtime;
static unsigned int usec;
static hipError_t cudaerr;
#define timeInMicroSeconds(var, funcall) \
({ \
gettimeofday(&starttime, NULL); \
funcall; \
CUDA_SAFE_THREAD_SYNC(); \
funcall; \
CUDA_SAFE_THREAD_SYNC(); \
funcall; \
CUDA_SAFE_THREAD_SYNC(); \
gettimeofday(&endtime, NULL); \
usec = ((endtime.tv_sec - starttime.tv_sec) * 1000000 + \
(endtime.tv_usec - starttime.tv_usec)); \
var = usec/3; \
})
// An empty kernel used to measure kernel call overhead.
// Note that how long this takes to run depends a lot on the size of block and grid.
__global__ void dummyKernel ()
{
}
//////////////////////////////////////////////////////////////////////////////////
// Start of the translation of Jiayuan Meng's MatLab code.
//////////////////////////////////////////////////////////////////////////////////
// Some helper functions for the main routine.
static inline int div_ceil(int num, int denom)
{
return (int)((num + denom - 1) / denom);
}
// These are called by the model.
// Most could probably be inlined.
double workingSet(int edge, int dimension)
{
return (double)(iexp(edge,dimension));
}
double memLat(double numElements, int coalesceWidth, double memQueueLat, double uncontendedLat)
{
double concurrentRequests = ((double)numElements)/coalesceWidth;
return (concurrentRequests*memQueueLat) + uncontendedLat;
}
double pyramidMemLat(int edge, int numBlocks, int halo, int dimension, int pyramidHeight, int coalesceWidth, double memQueueLat, double uncontendedLat)
{
double set = workingSet(edge-halo, dimension)*numBlocks;
return pyramidHeight*memLat(set, coalesceWidth, memQueueLat, uncontendedLat);
}
double blockCompLat(double numElements, double IPC, double instPerElementPerWarp)
{
return ((double)instPerElementPerWarp)/IPC*numElements;
}
double pyramidBlockCompLat(int edge, int halo, int dimension, int pyramidHeight, double IPC, double instPerElementPerWarp)
{
double set = workingSet(edge-halo, dimension);
return pyramidHeight*blockCompLat(set, IPC, instPerElementPerWarp);
}
// This is the main workhorse routine for the model.
// It takes in properties of the Stencil Application, the Cuda device the app will run on, and the block and trapezoid sizes.
// From these it calculates all the predicted latencies.
// This should be called (repeatedly) by some routine that does the optimization of the block side and trapezoid height.
double calcSACudaLats(SAProps_t * SAProps, int blockSize, int pyramidHeight)
{
CudaDeviceProps_t * CDPs = SAPs->CDPs;
SACudaLats_t * SACLs = SAPs->SACLs;
// Jiayuan assumed data set was the same size in all dimensions.
// TODO make it so that the different dimensions can be different sizes.
double dataEdge = SAProps->dataDims.x;
double halo = SAProps->haloDims.x;
int dims = SAProps->numDims;
// double numBlocks = iexp(div_ceil(dataEdge, (blockSize - (pyramidHeight*halo))), dims);
double numBlocks = iexp(dataEdge/(blockSize-(pyramidHeight*halo)),dims);
// This seems to be two magic constants 0.5 and bankConflict.
double IPC = 0.5/SAProps->bankConflict;
// Jiayuan's comments.
// This is for the GTX 280.
// double glbSync = 9033.0 * CPUclock/GPUclock;
// This is for the 9800
// glbSync = 95847*CPUclock/GPUclock;
// Now we get it from the device!
double glbSync = SAProps->globalSynchLat;
// Can we get this from the device information?
double coalesceWidth = 16;
// Another magic constant.
double uncontendedLat = 300;
// Why can't this be calculated?
// Something like numBlocks/SAPs->CDPs->multiProcessorCount??
// Instead, it is the number of ACTIVE blocks per MP.
// Capped at 8 for all current Cuda Devices per Cuda programming guide.
double numBlocksPerMP = 8;
// Another magic constant?
double factor = iexp(5,(dims-1));
// Staight from MatLab.
double requestSize = 4;
double bandwidth_BperCycle = 141.7 / 1.3;
double memQueueLat = requestSize*coalesceWidth/bandwidth_BperCycle*factor;
double numMPs = CDPs->multiProcessorCount;
double numConcurrentBlocks = numBlocksPerMP*numMPs;
// GGF If the store latency relies on the concurrency, why not the load?
// But making this change breaks the model.
// GGF double loadLat = ((double)numBlocks)/numConcurrentBlocks*SAProps->setupGlobalLoadsPerCell *
// memLat(workingSet(blockSize, dims) * numConcurrentBlocks, coalesceWidth, memQueueLat, uncontendedLat);
// Below is the original Jiayuan calculation.
double loadLat = SAProps->setupGlobalLoadsPerCell * memLat(workingSet(blockSize, dims)*numConcurrentBlocks, coalesceWidth, memQueueLat, uncontendedLat);
// GGF Why is the calculation of the store latency so different from the load latency??
// GGF double storeLat = memLat(workingSet(blockSize - (pyramidHeight * halo), dims) * numConcurrentBlocks, coalesceWidth, memQueueLat, uncontendedLat);
// Below is the original Jiayuan calculation.
double storeLat = ((double)numBlocks)/numConcurrentBlocks *
memLat(workingSet(blockSize - (pyramidHeight * halo), dims) * numConcurrentBlocks, coalesceWidth, memQueueLat, uncontendedLat);
double embeddedMemLat = ((double)SAProps->loopGlobalLoadsPerCell)*numBlocks / numConcurrentBlocks *
pyramidMemLat(blockSize, numConcurrentBlocks, halo, dims, pyramidHeight, coalesceWidth, memQueueLat, uncontendedLat);
// These values are now sampled dynamically.
double setupInstrPerWarp = SAPs->setupInstrPerWarp;
double totalInstrPerWarp = SAPs->totalInstrPerWarp;
// All the below directly from MatLab, with Jiayuan's comment at line end.
double computeLat = pyramidBlockCompLat(blockSize, halo, dims, pyramidHeight, IPC, totalInstrPerWarp - setupInstrPerWarp);
double setupCompLat = blockCompLat(workingSet(blockSize, dims), IPC, setupInstrPerWarp); // - loadLat;
SACLs->avgLoadLat = loadLat/pyramidHeight; // going down then suddenly high [Category A, major]
SACLs->avgStoreLat = storeLat/pyramidHeight; // going down then suddenly high [A, minor]
SACLs->avgCompLat = (computeLat*(numBlocks/numMPs))/pyramidHeight; // going higher always [Category B, major]
SACLs->avgSetupCompLat = (setupCompLat*(numBlocks/numMPs))/pyramidHeight; // going down then suddenly high [A, negligible]
SACLs->avgEmbedMemLat = embeddedMemLat/pyramidHeight; // going higher always [B, minor]
SACLs->avgGlobalSyncLat = glbSync/pyramidHeight;
// GGF Why is the computeLat and setupCompLat multipled by the numBlocks/numMps TWICE!?!
// GGF However, changing to below line makes the model calculate pyramid hieghts that are too high.
// GGF SACLs->totalLat = (glbSync + (computeLat + setupCompLat) + (loadLat + storeLat + embeddedMemLat)) / pyramidHeight;
// Below is original Jiayuan calculation.
SACLs->totalLat = (glbSync + (numBlocks / numMPs) * (computeLat + setupCompLat) + (loadLat + storeLat + embeddedMemLat)) / pyramidHeight;
return SACLs->totalLat;
}
// Put all the values in an array so we can find the second best as well as the best.
// This is a real hack to avoid returning a list to the template, or having to sort.
static double minLats[1024];
static int validLats;
int getSecond(int first)
{
int retval = 1;
double minLat = 1e40;
for (int i = 1; i<=validLats; i++)
{
if (i == first) continue;
if (minLats[i] < minLat)
{
retval = i;
minLat = minLats[i];
}
}
return retval;
}
int calcMinLatencyInternal(SAProps_t * SAPs)
{
// Make a structure to catch the various latencies.
SAPs->SACLs = makeSACudaLats();
// The blocksize has already been determined.
int blockSize = SAPs->blockDims.x;
// initialize min to hugely big number.
double minLat = 1e40;
// Try all the possible pyramid heights.
// In theory there is a descent that can be followed to a valley.
// But sometimes there is noise near the bottom of the value.
// And a descent will get sub-optimal value.
// Therefore, it currently calculates all and gets minimum.
// It is a trade-off between calculation time, and optimal running time.
validLats = blockSize/2 - SAPs->haloDims.x;
// fprintf(stderr, "Valid PH=%d.\n", validLats);
for (int i=1; i<=validLats; i++)
{
double pyrUp = calcSACudaLats(SAPs, blockSize, i);
#ifdef STATISTICS
fprintf(stderr, "Model Pyramid Size=%d, Model Latency=%f\n", i, pyrUp);
#endif
// Store results to support hack to find second best.
minLats[i] = pyrUp;
// Model should not generate negative values.
// So, this is a safety check.
if (pyrUp < 0)
{
break;
}
// Remember minimum latency, and associated pyramid height.
if (pyrUp < minLat)
{
minLat = pyrUp;
SAPs->pyramidHeight = i;
}
// Reinstate below line to just do the descent to the valley floor.
// else break;
}
// fprintf(stderr, "BlockSize=%d, Pyramid=%d, Latency==%f\n", blockSize, SAPs->pyramidHeight, minLat);
return SAPs->pyramidHeight;
}
int calcPyramidHeight(dim3 grid_dims, unsigned int oneIterTime, unsigned int twoIterTime)
{
// Now we have enough information for the training period.
// First call a kernel to make sure device is fully intialized.
hipLaunchKernelGGL(( dummyKernel), dim3(grid_dims), dim3(SAPs->blockDims), 0, 0, );
long int ekTime = 0;
timeInMicroSeconds(ekTime,hipLaunchKernelGGL(( (dummyKernel), dim3(grid_dims), dim3(SAPs->blockDims), 0, 0, )));
// fprintf(stderr, "Empty Kernel time=%u.\n", ekTime);
// Convert micro-seconds into GPU cycles.
if (SAPs->CDPs->major >= 2) SAPs->globalSynchLat = ((double)ekTime)*SAPs->CDPs->clockRate/1000;
else SAPs->globalSynchLat = 3350;
// Try setting it to Jiayuna's magic constant.
// SAPs->globalSynchLat = 3350;
// Try adding a bit to the synch time.
ekTime *= 2.0;
// fprintf(stderr, "One iter time=%u, Two iter time=%u.\n", oneIterTime, twoIterTime);
// Now let's calculate the various latencies.
long int oneIter = oneIterTime;
long int twoIter = twoIterTime;
// Remove the kernel call overhead.
oneIter -= ekTime;
twoIter -= ekTime;
// This is the rational calculation of the times for one iteration
// and for the setup time.
double fullTime = oneIter;
double setupTime = fullTime - (twoIter - oneIter);
// However, rational calculation does not work for small data sizes.
// THIS IS A TOTAL HACK!
// For some reason, we can't get good timings when things are small.
// if (setupTime <= ekTime) setupTime = 2 * ekTime;
// if (fullTime <= setupTime) fullTime = 2 * setupTime;
double magic = 1.25;
// if (SAPs->CDPs->major >= 2) magic = 3.0;
// else
if (SAPs->numDims == 2) magic = 3.0;
if (setupTime <= 0 || fullTime < magic * ekTime)
{
setupTime = MAX(setupTime, 1. * ekTime);
fullTime = MAX(fullTime, magic * setupTime);
fprintf(stderr, "changed setup and full.\n");
}
// Let's use the model to calculate the best height.
dim3 * dDims = &(SAPs->dataDims);
dim3 * bDims = &(SAPs->blockDims);
int WarpsPerBlock = div_ceil((bDims->x * bDims->y * bDims->z), SAPs->CDPs->warpSize);
int numOfWarps;
int numOfBlocks;
// setup was run with pyramid height of 1.
// Use div_ceil to match the grid sizes as calculated before the actual run was made.
numOfBlocks = iexp(div_ceil(dDims->x, bDims->x - 2*SAPs->haloDims.x), SAPs->numDims);
numOfWarps = numOfBlocks * WarpsPerBlock;
SAPs->setupInstrPerWarp = (((double)setupTime)*SAPs->CDPs->clockRate/1000)/numOfWarps;
// total was run with pyramid height of 2.
// But this makes the model worse for small data sizes.
// numOfBlocks = div_ceil(dDims->x, bDims->x - 4*SAPs->haloDims.x) * div_ceil(dDims->y, bDims->y - 4*SAPs->haloDims.y);
numOfWarps = numOfBlocks * WarpsPerBlock;
SAPs->totalInstrPerWarp = (((double)fullTime)*SAPs->CDPs->clockRate/1000)/numOfWarps;
// fprintf(stderr, "Total instructions per warp=%f.\n", SAPs->totalInstrPerWarp);
// fprintf(stderr, "Setup instructions per warp=%f.\n", SAPs->setupInstrPerWarp);
int ph = calcMinLatencyInternal(SAPs);
return ph;
}
| bcd3a5c0cc6d92f96facb73cc55c9cd58c2b4723.cu | /* -*- Mode: C ; indent-tabs-mode: nil ; c-file-style: "stroustrup" -*-
CS 6620 - Compilers
Stencil App Language Project
Authors: Greg Faust, Sal Valente
File: Model.cu Contains the analytical model for predicting stencil app latencies based on input sizes and trapezoid height.
TODO For now, this is more or less a direct translation of the MatLab code.
All of that code assumes all the data and blocks are the same size in all dimensions.
Once we get this working, we might consider relaxing that assumption.
Also, MatLab is not typed.
Apparently, the current latencies are in terms of GPU cycles.
To avoid step functions, I have used doubles throughout.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cutil.h>
#include <sys/time.h>
#include "Model.h"
// This turns on the debugging capabilities of things defined in cutil.h
#define _DEBUG
// First, give some simple math helper functions.
double iexp(double base, int power)
{
double retval = 1;
for (int i=0; i<power; i++) retval *= base;
return retval;
}
// This will return the largest int that goes into composite, possibly with leftover.
// That is, it is floor of the real log.
int ilog(int root, int composite)
{
double retd = pow(composite, 1.0/root);
// Give it a little wiggle room for floating point errors.
return (int)(retd + .02);
}
// Some CUDA helper furnctions.
inline dim3 filldim3(dim3 * d3, int x = 1, int y = 1, int z = 1)
{
d3->x = x;
d3->y = y;
d3->z = z;
return *d3;
}
inline dim3 copydim3(dim3 in, dim3 out)
{
out.x = in.x;
out.y = in.y;
out.z = in.z;
return out;
}
// A debug helper.
// We can probably get rid of this before code freeze.
int printCudaDevices()
{
int curDev;
CUDA_SAFE_CALL(cudaGetDevice(&curDev));
fprintf(stderr, "Current cuda device is: %d.\n", curDev);
int devCount;
CUDA_SAFE_CALL(cudaGetDeviceCount(&devCount));
fprintf(stderr, "There are %d cuda devices on this machine.\n", devCount);
int i;
CudaDeviceProps_t * devProps = (CudaDeviceProps_t *)malloc(sizeof(CudaDeviceProps_t));
for (i=0; i<devCount; i++)
{
CUDA_SAFE_CALL(cudaGetDeviceProperties(devProps, i));
printCudaDeviceProps(devProps);
}
free(devProps);
return curDev;
}
// A debug helper.
// We can probably get rid of this before code freeze.
void printCudaDeviceProps (CudaDeviceProps_t * devProps)
{
fprintf(stdout, "CUDA device \"%s\" properites.\n", devProps->name);
fprintf(stdout, "Release %d.%d.\n", devProps->major, devProps->minor);
// fprintf(stdout, "Compute Mode=%d.\n", devProps->computeMode);
fprintf(stdout, "Global Memory Size=%zd.\n", devProps->totalGlobalMem);
fprintf(stdout, "Shared Memory Per Block=%zd.\n", devProps->sharedMemPerBlock);
fprintf(stdout, "Registers Per Block=%d.\n", devProps->regsPerBlock);
fprintf(stdout, "Clock Rate (KiloHertz)=%d.\n", devProps->clockRate);
fprintf(stdout, "Warp Size=%d.\n", devProps->warpSize);
fprintf(stdout, "Maximum Threads per Block=%d.\n", devProps->maxThreadsPerBlock);
fprintf(stdout, "Maximum Block Dimensions=[%d, %d, %d].\n", devProps->maxThreadsDim[0], devProps->maxThreadsDim[1], devProps->maxThreadsDim[2]);
fprintf(stdout, "Maximum Grid Dimensions=[%d, %d, %d].\n", devProps->maxGridSize[0], devProps->maxGridSize[1], devProps->maxGridSize[2]);
}
// We need these as part of the runtime system.
CudaDeviceProps_t * getCudaDeviceProps (int devNum)
{
CudaDeviceProps_t * devProps = (CudaDeviceProps_t *)malloc(sizeof(CudaDeviceProps_t));
CUDA_SAFE_CALL(cudaGetDeviceProperties(devProps, devNum));
return devProps;
}
CudaDeviceProps_t * getCurrentCudaDeviceProps ()
{
int curDev;
CUDA_SAFE_CALL(cudaGetDevice(&curDev));
return getCudaDeviceProps(curDev);
}
// We need this as part of the runtime system.
CudaFunctionAtts_t * getCudaFunctionAtts (char * functionName)
{
CudaFunctionAtts_t * FAs = (CudaFunctionAtts_t * )malloc(sizeof(CudaFunctionAtts_t));
CUDA_SAFE_CALL(cudaFuncGetAttributes(FAs, "groupClumps")); // "_Z11groupClumpssiiPtPjS0_p8fragment"));
// fprintf(stderr, "Max Threads per block in groupClumps=%d and register count=%d\n", FAs->maxThreadsPerBlock, FAs->numRegs);
return FAs;
}
// Make a SACuda Latency structure, and give default values to all fields.
SACudaLats_t * makeSACudaLats()
{
SACudaLats_t * SACLs = (SACudaLats_t *)(malloc(sizeof(SACudaLats_t)));
SACLs->totalLat = 0;
SACLs->avgLoadLat = 0;
SACLs->avgStoreLat = 0;
SACLs->avgCompLat = 0;
SACLs->avgSetupCompLat = 0;
SACLs->avgEmbedMemLat = 0;
SACLs->avgGlobalSyncLat = 0;
return SACLs;
};
// Dispose of a SACuda Latency structure.
void disposeSACudaLats(SACudaLats_t * SACLs)
{
free(SACLs);
SACLs=NULL;
}
// Print out SACL values to aid in debugging.
void printSACLs(SACudaLats_t * SACLs, int pyramidH)
{
fprintf(stderr, "SACL avgLoadLat=%f, total=%f\n", SACLs->avgLoadLat, SACLs->avgLoadLat * pyramidH);
fprintf(stderr, "SACL avgStoreLat=%f, total=%f\n", SACLs->avgStoreLat, SACLs->avgStoreLat * pyramidH);
fprintf(stderr, "SACL avgEmbedMemLat=%f, total=%f\n", SACLs->avgEmbedMemLat, SACLs->avgEmbedMemLat * pyramidH);
fprintf(stderr, "SACL avgCompLat=%f, total=%f\n", SACLs->avgCompLat, SACLs->avgCompLat * pyramidH);
fprintf(stderr, "SACL avgSetupCompLat=%f, total=%f\n", SACLs->avgSetupCompLat, SACLs->avgSetupCompLat * pyramidH);
fprintf(stderr, "SACL avgGlobalSynchLat=%f, total=%f\n", SACLs->avgGlobalSyncLat, SACLs->avgGlobalSyncLat * pyramidH);
fprintf(stderr, "SACL TotalLat=%f, total=%f\n", SACLs->totalLat, SACLs->totalLat * pyramidH);
}
static SAProps_t * SAPs;
// Make a SAProps structure, and give default values to some fields.
dim3 initSAProps(int dims, dim3 input_size, dim3 stencil_size, int iterations, int dataESize, char * kernelName)
{
SAPs = (SAProps_t *)(malloc(sizeof(SAProps_t)));
// These all come directly from the input args to the app or in the stencil language.
SAPs->numDims = dims;
SAPs->dataDims = input_size;
SAPs->haloDims = stencil_size;
SAPs->iterations = iterations;
SAPs->dataElemSize = dataESize;
// TODO In order to get these right, we will need a new stencil app declaration of data reads in the CellValue calculation.
// This used to be 1 for all 4 samples except hotSpot (where it was 2).
// Now it is 1 for ALL apps.
SAPs->setupGlobalLoadsPerCell = 1;
// This used to be 0 for all 4 samples except pathfinder (where it was 1).
// But now it will be 1 for any app that accesses global read only data.
// The model does not seem very sensitive to this value.
SAPs->loopGlobalLoadsPerCell = 1;
// This is 1 for all 4 samples except cell (where it is 2 because of the dimensionality.
// I see no way to derive this value from the app at all.
SAPs->bankConflict = (dims < 3) ? 1 : 2;
// Now we will calculate the block dimensions.
// Since the app will ALWAYS run faster with larger block size,
// We will make the blocks as big as they can be on the device.
// TODO We could perhaps also look at the shared memory and make sure we fit.
// But for now, this is not a limiting factor for any of the sample apps.
CudaFunctionAtts_t * CFAs = (CudaFunctionAtts_t *)malloc(sizeof(CudaFunctionAtts_t));
SAPs->CFAs = CFAs;
CUDA_SAFE_CALL(cudaFuncGetAttributes(CFAs, kernelName));
// fprintf(stderr, "Max Threads per block in %s=%d, register count=%d, sharedMemUsage=%d.\n", kernelName, CFAs->maxThreadsPerBlock, CFAs->numRegs, CFAs->sharedSizeBytes);
int blockLen = ilog(dims, CFAs->maxThreadsPerBlock);
// fprintf(stderr, "Block Length=%d.\n", blockLen);
// The block size can't be larger than the data size!
SAPs->blockDims.x = MIN(blockLen, SAPs->dataDims.x);
SAPs->blockDims.y = SAPs->blockDims.z = 1;
if (dims > 1) SAPs->blockDims.y = MIN(blockLen, SAPs->dataDims.y);
if (dims > 2) SAPs->blockDims.z = MIN(blockLen, SAPs->dataDims.z);
// Fill in the cuda device properties.
SAPs->CDPs = getCurrentCudaDeviceProps ();
return SAPs->blockDims;
}
void disposeSAProps(SAProps_t * SAPs)
{
free(SAPs);
SAPs=NULL;
}
// TODO It would be better if this were not a macro.
// However, it calls kernel functions.
// To make it a function, we would have to figure out how to get
// function ptrs for kernels, and with names generated by the stencil tool.
// NOTE: I tried calling the kernel 3 times and averaging, but it did not materially improve things.
static struct timeval starttime, endtime;
static unsigned int usec;
static cudaError_t cudaerr;
#define timeInMicroSeconds(var, funcall) \
({ \
gettimeofday(&starttime, NULL); \
funcall; \
CUDA_SAFE_THREAD_SYNC(); \
funcall; \
CUDA_SAFE_THREAD_SYNC(); \
funcall; \
CUDA_SAFE_THREAD_SYNC(); \
gettimeofday(&endtime, NULL); \
usec = ((endtime.tv_sec - starttime.tv_sec) * 1000000 + \
(endtime.tv_usec - starttime.tv_usec)); \
var = usec/3; \
})
// An empty kernel used to measure kernel call overhead.
// Note that how long this takes to run depends a lot on the size of block and grid.
__global__ void dummyKernel ()
{
}
//////////////////////////////////////////////////////////////////////////////////
// Start of the translation of Jiayuan Meng's MatLab code.
//////////////////////////////////////////////////////////////////////////////////
// Some helper functions for the main routine.
static inline int div_ceil(int num, int denom)
{
return (int)((num + denom - 1) / denom);
}
// These are called by the model.
// Most could probably be inlined.
double workingSet(int edge, int dimension)
{
return (double)(iexp(edge,dimension));
}
double memLat(double numElements, int coalesceWidth, double memQueueLat, double uncontendedLat)
{
double concurrentRequests = ((double)numElements)/coalesceWidth;
return (concurrentRequests*memQueueLat) + uncontendedLat;
}
double pyramidMemLat(int edge, int numBlocks, int halo, int dimension, int pyramidHeight, int coalesceWidth, double memQueueLat, double uncontendedLat)
{
double set = workingSet(edge-halo, dimension)*numBlocks;
return pyramidHeight*memLat(set, coalesceWidth, memQueueLat, uncontendedLat);
}
double blockCompLat(double numElements, double IPC, double instPerElementPerWarp)
{
return ((double)instPerElementPerWarp)/IPC*numElements;
}
double pyramidBlockCompLat(int edge, int halo, int dimension, int pyramidHeight, double IPC, double instPerElementPerWarp)
{
double set = workingSet(edge-halo, dimension);
return pyramidHeight*blockCompLat(set, IPC, instPerElementPerWarp);
}
// This is the main workhorse routine for the model.
// It takes in properties of the Stencil Application, the Cuda device the app will run on, and the block and trapezoid sizes.
// From these it calculates all the predicted latencies.
// This should be called (repeatedly) by some routine that does the optimization of the block side and trapezoid height.
double calcSACudaLats(SAProps_t * SAProps, int blockSize, int pyramidHeight)
{
CudaDeviceProps_t * CDPs = SAPs->CDPs;
SACudaLats_t * SACLs = SAPs->SACLs;
// Jiayuan assumed data set was the same size in all dimensions.
// TODO make it so that the different dimensions can be different sizes.
double dataEdge = SAProps->dataDims.x;
double halo = SAProps->haloDims.x;
int dims = SAProps->numDims;
// double numBlocks = iexp(div_ceil(dataEdge, (blockSize - (pyramidHeight*halo))), dims);
double numBlocks = iexp(dataEdge/(blockSize-(pyramidHeight*halo)),dims);
// This seems to be two magic constants 0.5 and bankConflict.
double IPC = 0.5/SAProps->bankConflict;
// Jiayuan's comments.
// This is for the GTX 280.
// double glbSync = 9033.0 * CPUclock/GPUclock;
// This is for the 9800
// glbSync = 95847*CPUclock/GPUclock;
// Now we get it from the device!
double glbSync = SAProps->globalSynchLat;
// Can we get this from the device information?
double coalesceWidth = 16;
// Another magic constant.
double uncontendedLat = 300;
// Why can't this be calculated?
// Something like numBlocks/SAPs->CDPs->multiProcessorCount??
// Instead, it is the number of ACTIVE blocks per MP.
// Capped at 8 for all current Cuda Devices per Cuda programming guide.
double numBlocksPerMP = 8;
// Another magic constant?
double factor = iexp(5,(dims-1));
// Staight from MatLab.
double requestSize = 4;
double bandwidth_BperCycle = 141.7 / 1.3;
double memQueueLat = requestSize*coalesceWidth/bandwidth_BperCycle*factor;
double numMPs = CDPs->multiProcessorCount;
double numConcurrentBlocks = numBlocksPerMP*numMPs;
// GGF If the store latency relies on the concurrency, why not the load?
// But making this change breaks the model.
// GGF double loadLat = ((double)numBlocks)/numConcurrentBlocks*SAProps->setupGlobalLoadsPerCell *
// memLat(workingSet(blockSize, dims) * numConcurrentBlocks, coalesceWidth, memQueueLat, uncontendedLat);
// Below is the original Jiayuan calculation.
double loadLat = SAProps->setupGlobalLoadsPerCell * memLat(workingSet(blockSize, dims)*numConcurrentBlocks, coalesceWidth, memQueueLat, uncontendedLat);
// GGF Why is the calculation of the store latency so different from the load latency??
// GGF double storeLat = memLat(workingSet(blockSize - (pyramidHeight * halo), dims) * numConcurrentBlocks, coalesceWidth, memQueueLat, uncontendedLat);
// Below is the original Jiayuan calculation.
double storeLat = ((double)numBlocks)/numConcurrentBlocks *
memLat(workingSet(blockSize - (pyramidHeight * halo), dims) * numConcurrentBlocks, coalesceWidth, memQueueLat, uncontendedLat);
double embeddedMemLat = ((double)SAProps->loopGlobalLoadsPerCell)*numBlocks / numConcurrentBlocks *
pyramidMemLat(blockSize, numConcurrentBlocks, halo, dims, pyramidHeight, coalesceWidth, memQueueLat, uncontendedLat);
// These values are now sampled dynamically.
double setupInstrPerWarp = SAPs->setupInstrPerWarp;
double totalInstrPerWarp = SAPs->totalInstrPerWarp;
// All the below directly from MatLab, with Jiayuan's comment at line end.
double computeLat = pyramidBlockCompLat(blockSize, halo, dims, pyramidHeight, IPC, totalInstrPerWarp - setupInstrPerWarp);
double setupCompLat = blockCompLat(workingSet(blockSize, dims), IPC, setupInstrPerWarp); // - loadLat;
SACLs->avgLoadLat = loadLat/pyramidHeight; // going down then suddenly high [Category A, major]
SACLs->avgStoreLat = storeLat/pyramidHeight; // going down then suddenly high [A, minor]
SACLs->avgCompLat = (computeLat*(numBlocks/numMPs))/pyramidHeight; // going higher always [Category B, major]
SACLs->avgSetupCompLat = (setupCompLat*(numBlocks/numMPs))/pyramidHeight; // going down then suddenly high [A, negligible]
SACLs->avgEmbedMemLat = embeddedMemLat/pyramidHeight; // going higher always [B, minor]
SACLs->avgGlobalSyncLat = glbSync/pyramidHeight;
// GGF Why is the computeLat and setupCompLat multipled by the numBlocks/numMps TWICE!?!
// GGF However, changing to below line makes the model calculate pyramid hieghts that are too high.
// GGF SACLs->totalLat = (glbSync + (computeLat + setupCompLat) + (loadLat + storeLat + embeddedMemLat)) / pyramidHeight;
// Below is original Jiayuan calculation.
SACLs->totalLat = (glbSync + (numBlocks / numMPs) * (computeLat + setupCompLat) + (loadLat + storeLat + embeddedMemLat)) / pyramidHeight;
return SACLs->totalLat;
}
// Put all the values in an array so we can find the second best as well as the best.
// This is a real hack to avoid returning a list to the template, or having to sort.
static double minLats[1024];
static int validLats;
int getSecond(int first)
{
int retval = 1;
double minLat = 1e40;
for (int i = 1; i<=validLats; i++)
{
if (i == first) continue;
if (minLats[i] < minLat)
{
retval = i;
minLat = minLats[i];
}
}
return retval;
}
int calcMinLatencyInternal(SAProps_t * SAPs)
{
// Make a structure to catch the various latencies.
SAPs->SACLs = makeSACudaLats();
// The blocksize has already been determined.
int blockSize = SAPs->blockDims.x;
// initialize min to hugely big number.
double minLat = 1e40;
// Try all the possible pyramid heights.
// In theory there is a descent that can be followed to a valley.
// But sometimes there is noise near the bottom of the value.
// And a descent will get sub-optimal value.
// Therefore, it currently calculates all and gets minimum.
// It is a trade-off between calculation time, and optimal running time.
validLats = blockSize/2 - SAPs->haloDims.x;
// fprintf(stderr, "Valid PH=%d.\n", validLats);
for (int i=1; i<=validLats; i++)
{
double pyrUp = calcSACudaLats(SAPs, blockSize, i);
#ifdef STATISTICS
fprintf(stderr, "Model Pyramid Size=%d, Model Latency=%f\n", i, pyrUp);
#endif
// Store results to support hack to find second best.
minLats[i] = pyrUp;
// Model should not generate negative values.
// So, this is a safety check.
if (pyrUp < 0)
{
break;
}
// Remember minimum latency, and associated pyramid height.
if (pyrUp < minLat)
{
minLat = pyrUp;
SAPs->pyramidHeight = i;
}
// Reinstate below line to just do the descent to the valley floor.
// else break;
}
// fprintf(stderr, "BlockSize=%d, Pyramid=%d, Latency==%f\n", blockSize, SAPs->pyramidHeight, minLat);
return SAPs->pyramidHeight;
}
int calcPyramidHeight(dim3 grid_dims, unsigned int oneIterTime, unsigned int twoIterTime)
{
// Now we have enough information for the training period.
// First call a kernel to make sure device is fully intialized.
dummyKernel<<<grid_dims, SAPs->blockDims>>>();
long int ekTime = 0;
timeInMicroSeconds(ekTime, (dummyKernel<<<grid_dims, SAPs->blockDims>>>()));
// fprintf(stderr, "Empty Kernel time=%u.\n", ekTime);
// Convert micro-seconds into GPU cycles.
if (SAPs->CDPs->major >= 2) SAPs->globalSynchLat = ((double)ekTime)*SAPs->CDPs->clockRate/1000;
else SAPs->globalSynchLat = 3350;
// Try setting it to Jiayuna's magic constant.
// SAPs->globalSynchLat = 3350;
// Try adding a bit to the synch time.
ekTime *= 2.0;
// fprintf(stderr, "One iter time=%u, Two iter time=%u.\n", oneIterTime, twoIterTime);
// Now let's calculate the various latencies.
long int oneIter = oneIterTime;
long int twoIter = twoIterTime;
// Remove the kernel call overhead.
oneIter -= ekTime;
twoIter -= ekTime;
// This is the rational calculation of the times for one iteration
// and for the setup time.
double fullTime = oneIter;
double setupTime = fullTime - (twoIter - oneIter);
// However, rational calculation does not work for small data sizes.
// THIS IS A TOTAL HACK!
// For some reason, we can't get good timings when things are small.
// if (setupTime <= ekTime) setupTime = 2 * ekTime;
// if (fullTime <= setupTime) fullTime = 2 * setupTime;
double magic = 1.25;
// if (SAPs->CDPs->major >= 2) magic = 3.0;
// else
if (SAPs->numDims == 2) magic = 3.0;
if (setupTime <= 0 || fullTime < magic * ekTime)
{
setupTime = MAX(setupTime, 1. * ekTime);
fullTime = MAX(fullTime, magic * setupTime);
fprintf(stderr, "changed setup and full.\n");
}
// Let's use the model to calculate the best height.
dim3 * dDims = &(SAPs->dataDims);
dim3 * bDims = &(SAPs->blockDims);
int WarpsPerBlock = div_ceil((bDims->x * bDims->y * bDims->z), SAPs->CDPs->warpSize);
int numOfWarps;
int numOfBlocks;
// setup was run with pyramid height of 1.
// Use div_ceil to match the grid sizes as calculated before the actual run was made.
numOfBlocks = iexp(div_ceil(dDims->x, bDims->x - 2*SAPs->haloDims.x), SAPs->numDims);
numOfWarps = numOfBlocks * WarpsPerBlock;
SAPs->setupInstrPerWarp = (((double)setupTime)*SAPs->CDPs->clockRate/1000)/numOfWarps;
// total was run with pyramid height of 2.
// But this makes the model worse for small data sizes.
// numOfBlocks = div_ceil(dDims->x, bDims->x - 4*SAPs->haloDims.x) * div_ceil(dDims->y, bDims->y - 4*SAPs->haloDims.y);
numOfWarps = numOfBlocks * WarpsPerBlock;
SAPs->totalInstrPerWarp = (((double)fullTime)*SAPs->CDPs->clockRate/1000)/numOfWarps;
// fprintf(stderr, "Total instructions per warp=%f.\n", SAPs->totalInstrPerWarp);
// fprintf(stderr, "Setup instructions per warp=%f.\n", SAPs->setupInstrPerWarp);
int ph = calcMinLatencyInternal(SAPs);
return ph;
}
|
23c10684ad1aae8fed7e017b378f3ac260ecbfaa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include "coll_test.h"
__device__ double *pWrk;
__global__ void test_sync_call_kern(nvshmem_team_t team, int mype, double *d_time_avg, double *h_thread_lat,
double *h_warp_lat, double *h_block_lat) {
int iter = MAX_ITERS;
int skip = MAX_SKIP;
long long int start = 0, stop = 0;
double thread_usec, warp_usec, block_usec, time = 0;
int i;
double *dest_r, *source_r;
int PE_size = nvshmem_team_n_pes(team);
source_r = d_time_avg;
dest_r = (double *)((double *)d_time_avg + 1);
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x && !threadIdx.x) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmem_team_sync(team);
if (i > skip) stop = clock64();
time += (stop - start);
}
nvshmem_barrier_all();
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
thread_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x && !(threadIdx.x / warpSize)) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmemx_team_sync_warp(team);
if (i > skip) stop = clock64();
time += (stop - start);
}
nvshmemx_barrier_all_warp();
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
warp_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmemx_team_sync_block(team);
if (i > skip) stop = clock64();
time += (stop - start);
}
nvshmemx_barrier_all_block();
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
block_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x && !threadIdx.x) nvshmem_barrier_all();
if (!threadIdx.x && !blockIdx.x && !mype) {
*h_thread_lat = thread_usec;
*h_warp_lat = warp_usec;
*h_block_lat = block_usec;
}
}
__global__ void test_sync_all_call_kern(nvshmem_team_t team, int mype, double *d_time_avg, double *h_thread_lat,
double *h_warp_lat, double *h_block_lat) {
int iter = MAX_ITERS;
int skip = MAX_SKIP;
long long int start = 0, stop = 0;
double thread_usec, warp_usec, block_usec, time = 0;
int i;
double *dest_r, *source_r;
int PE_size = nvshmem_team_n_pes(team);
source_r = d_time_avg;
dest_r = (double *)((double *)d_time_avg + 1);
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x && !threadIdx.x) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmem_sync_all();
if (i > skip) stop = clock64();
time += (stop - start);
}
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
thread_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x && !(threadIdx.x / warpSize)) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmemx_sync_all_warp();
if (i > skip) stop = clock64();
time += (stop - start);
}
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
warp_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmemx_sync_all_block();
if (i > skip) stop = clock64();
time += (stop - start);
}
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
block_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x && !threadIdx.x) nvshmem_barrier_all();
if (!threadIdx.x && !blockIdx.x && !mype) {
*h_thread_lat = thread_usec;
*h_warp_lat = warp_usec;
*h_block_lat = block_usec;
}
}
int sync_calling_kernel(nvshmem_team_t team, hipStream_t stream, int mype, double *d_time_avg, void **h_tables) {
int status = 0;
int nvshm_test_num_tpb = TEST_NUM_TPB_BLOCK;
int num_blocks = 1;
double *h_thread_lat = (double *)h_tables[0];
double *h_warp_lat = (double *)h_tables[1];
double *h_block_lat = (double *)h_tables[2];
uint64_t num_tpb = TEST_NUM_TPB_BLOCK;
nvshmem_barrier_all();
hipLaunchKernelGGL(( test_sync_call_kern), dim3(num_blocks), dim3(nvshm_test_num_tpb), 0, stream, team, mype, d_time_avg,
h_thread_lat, h_warp_lat, h_block_lat);
cuda_check_error();
CUDA_CHECK(hipStreamSynchronize(stream));
if (!mype) {
print_table("sync_device", "thread", "threads per block", "latency", "us", '-', &num_tpb, h_thread_lat, 1);
print_table("sync_device", "warp", "threads per block", "latency", "us", '-', &num_tpb, h_warp_lat, 1);
print_table("sync_device", "block", "threads per block", "latency", "us", '-', &num_tpb, h_block_lat, 1);
}
nvshmem_barrier_all();
hipLaunchKernelGGL(( test_sync_all_call_kern), dim3(num_blocks), dim3(nvshm_test_num_tpb), 0, stream, team, mype, d_time_avg,
h_thread_lat, h_warp_lat, h_block_lat);
cuda_check_error();
CUDA_CHECK(hipStreamSynchronize(stream));
if (!mype) {
print_table("sync_all_device", "thread", "threads per block", "latency", "us", '-', &num_tpb, h_thread_lat, 1);
print_table("sync_all_device", "warp", "threads per block", "latency", "us", '-', &num_tpb, h_warp_lat, 1);
print_table("sync_all_device", "block", "threads per block", "latency", "us", '-', &num_tpb, h_block_lat, 1);
}
return status;
}
int main(int argc, char **argv) {
int mype;
double *d_time_avg;
hipStream_t cstrm;
void **h_tables;
init_wrapper(&argc, &argv);
alloc_tables(&h_tables, 3, 1);
mype = nvshmem_my_pe();
CUDA_CHECK(hipStreamCreateWithFlags(&cstrm, hipStreamNonBlocking));
d_time_avg = (double *)nvshmem_malloc(sizeof(double) * 2);
sync_calling_kernel(NVSHMEM_TEAM_WORLD, cstrm, mype, d_time_avg, h_tables);
nvshmem_barrier_all();
nvshmem_free(d_time_avg);
CUDA_CHECK(hipStreamDestroy(cstrm));
free_tables(h_tables, 3);
finalize_wrapper();
return 0;
}
| 23c10684ad1aae8fed7e017b378f3ac260ecbfaa.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include "coll_test.h"
__device__ double *pWrk;
__global__ void test_sync_call_kern(nvshmem_team_t team, int mype, double *d_time_avg, double *h_thread_lat,
double *h_warp_lat, double *h_block_lat) {
int iter = MAX_ITERS;
int skip = MAX_SKIP;
long long int start = 0, stop = 0;
double thread_usec, warp_usec, block_usec, time = 0;
int i;
double *dest_r, *source_r;
int PE_size = nvshmem_team_n_pes(team);
source_r = d_time_avg;
dest_r = (double *)((double *)d_time_avg + 1);
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x && !threadIdx.x) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmem_team_sync(team);
if (i > skip) stop = clock64();
time += (stop - start);
}
nvshmem_barrier_all();
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
thread_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x && !(threadIdx.x / warpSize)) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmemx_team_sync_warp(team);
if (i > skip) stop = clock64();
time += (stop - start);
}
nvshmemx_barrier_all_warp();
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
warp_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmemx_team_sync_block(team);
if (i > skip) stop = clock64();
time += (stop - start);
}
nvshmemx_barrier_all_block();
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
block_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x && !threadIdx.x) nvshmem_barrier_all();
if (!threadIdx.x && !blockIdx.x && !mype) {
*h_thread_lat = thread_usec;
*h_warp_lat = warp_usec;
*h_block_lat = block_usec;
}
}
__global__ void test_sync_all_call_kern(nvshmem_team_t team, int mype, double *d_time_avg, double *h_thread_lat,
double *h_warp_lat, double *h_block_lat) {
int iter = MAX_ITERS;
int skip = MAX_SKIP;
long long int start = 0, stop = 0;
double thread_usec, warp_usec, block_usec, time = 0;
int i;
double *dest_r, *source_r;
int PE_size = nvshmem_team_n_pes(team);
source_r = d_time_avg;
dest_r = (double *)((double *)d_time_avg + 1);
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x && !threadIdx.x) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmem_sync_all();
if (i > skip) stop = clock64();
time += (stop - start);
}
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
thread_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x && !(threadIdx.x / warpSize)) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmemx_sync_all_warp();
if (i > skip) stop = clock64();
time += (stop - start);
}
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
warp_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x) nvshmemx_barrier_all_block();
time = 0;
if (!blockIdx.x) {
for (i = 0; i < (iter + skip); i++) {
if (i > skip) start = clock64();
nvshmemx_sync_all_block();
if (i > skip) stop = clock64();
time += (stop - start);
}
if (!threadIdx.x) {
*source_r = time;
nvshmem_double_sum_reduce(team, dest_r, source_r, 1);
time = *dest_r;
if (mype == 0) {
time = time / iter;
time = time / PE_size;
block_usec = time * 1000 / clockrate;
}
}
}
__syncthreads();
if (!blockIdx.x && !threadIdx.x) nvshmem_barrier_all();
if (!threadIdx.x && !blockIdx.x && !mype) {
*h_thread_lat = thread_usec;
*h_warp_lat = warp_usec;
*h_block_lat = block_usec;
}
}
int sync_calling_kernel(nvshmem_team_t team, cudaStream_t stream, int mype, double *d_time_avg, void **h_tables) {
int status = 0;
int nvshm_test_num_tpb = TEST_NUM_TPB_BLOCK;
int num_blocks = 1;
double *h_thread_lat = (double *)h_tables[0];
double *h_warp_lat = (double *)h_tables[1];
double *h_block_lat = (double *)h_tables[2];
uint64_t num_tpb = TEST_NUM_TPB_BLOCK;
nvshmem_barrier_all();
test_sync_call_kern<<<num_blocks, nvshm_test_num_tpb, 0, stream>>>(team, mype, d_time_avg,
h_thread_lat, h_warp_lat, h_block_lat);
cuda_check_error();
CUDA_CHECK(cudaStreamSynchronize(stream));
if (!mype) {
print_table("sync_device", "thread", "threads per block", "latency", "us", '-', &num_tpb, h_thread_lat, 1);
print_table("sync_device", "warp", "threads per block", "latency", "us", '-', &num_tpb, h_warp_lat, 1);
print_table("sync_device", "block", "threads per block", "latency", "us", '-', &num_tpb, h_block_lat, 1);
}
nvshmem_barrier_all();
test_sync_all_call_kern<<<num_blocks, nvshm_test_num_tpb, 0, stream>>>(team, mype, d_time_avg,
h_thread_lat, h_warp_lat, h_block_lat);
cuda_check_error();
CUDA_CHECK(cudaStreamSynchronize(stream));
if (!mype) {
print_table("sync_all_device", "thread", "threads per block", "latency", "us", '-', &num_tpb, h_thread_lat, 1);
print_table("sync_all_device", "warp", "threads per block", "latency", "us", '-', &num_tpb, h_warp_lat, 1);
print_table("sync_all_device", "block", "threads per block", "latency", "us", '-', &num_tpb, h_block_lat, 1);
}
return status;
}
int main(int argc, char **argv) {
int mype;
double *d_time_avg;
cudaStream_t cstrm;
void **h_tables;
init_wrapper(&argc, &argv);
alloc_tables(&h_tables, 3, 1);
mype = nvshmem_my_pe();
CUDA_CHECK(cudaStreamCreateWithFlags(&cstrm, cudaStreamNonBlocking));
d_time_avg = (double *)nvshmem_malloc(sizeof(double) * 2);
sync_calling_kernel(NVSHMEM_TEAM_WORLD, cstrm, mype, d_time_avg, h_tables);
nvshmem_barrier_all();
nvshmem_free(d_time_avg);
CUDA_CHECK(cudaStreamDestroy(cstrm));
free_tables(h_tables, 3);
finalize_wrapper();
return 0;
}
|
6c49eb6bf2ee6d1ba796f1e86e8a76a8dc5a611f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <iostream>
#include <time.h>
#include "gpu-new-forward.h"
#define TILE_WIDTH 16
__global__ void conv_forward_kernel(float *y, const float * __restrict__ x, const float * __restrict__ k, const int B, const int M, const int C, const int H, const int W, const int K)
{
/*
Modify this function to implement the forward pass described in Chapter 16.
We have added an additional dimension to the tensors to support an entire mini-batch
The goal here is to be correct AND fast.
Function paramter definitions:
y - output
x - input
k - kernel
B - batch_size (number of images in x)
M - number of output feature maps
C - number of input feature maps
H - input height dimension
W - input width dimension
K - kernel height and width (K x K)
*/
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own.
// An example use of these macros:
// float a = y4d(0,0,0,0)
// y4d(0,0,0,0) = a
#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
#define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0]
// Insert your GPU convolution kernel code here
const int W_grid = ceil(1.*W_out/TILE_WIDTH);
const int H_grid = ceil(1.*H_out/TILE_WIDTH);
int n = blockIdx.x;
int m = blockIdx.y;
int h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
int w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
// out of bounds
if (h < H_out && w < W_out) {
float acc = 0.0f;
#pragma unroll
for (int c = 0; c < C; c++) { // sum over all input feature maps
#pragma unroll 7
for (int p = 0; p < K; p++) {
// KxK filter
#pragma unroll 7
for (int q = 0; q < K; q++) {
acc += x4d(n, c, h + p, w + q) * k4d(m, c, p, q);
}
}
}
y4d(n, m, h, w) = acc;
}
#undef y4d
#undef x4d
#undef k4d
}
__host__ void GPUInterface::conv_forward_gpu1(float *host_y, const float *host_x, const float *host_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// Declare relevant device pointers
float* device_y;
float* device_x;
float* device_k;
// Allocate memory and copy over the relevant data structures to the GPU
hipMalloc((void**) &device_y, H_out * W_out * M * B * sizeof(float));
hipMalloc((void**) &device_x, H * W * C * B * sizeof(float));
hipMalloc((void**) &device_k, K * K * M * C * sizeof(float));
hipMemcpy(device_x, host_x, H * W * C * B * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_k, host_k, K * K * M * C * sizeof(float), hipMemcpyHostToDevice);
// Set the kernel dimensions and call the kernel
int W_grid = ceil(W_out/(1.0 * TILE_WIDTH)); // number of horizontal tiles per output map
int H_grid = ceil(H_out/(1.0 * TILE_WIDTH)); // number of vertical tiles per output map
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(B, M, Z);
hipLaunchKernelGGL(( conv_forward_kernel), dim3(gridDim), dim3(blockDim), 0, 0, device_y,device_x,device_k,B,M,C,H,W,K);
// Copy the output back to host
hipMemcpy(host_y, device_y, H_out * W_out * M * B * sizeof(float), hipMemcpyDeviceToHost);
// Free device memory
hipFree(device_x);
hipFree(device_y);
hipFree(device_k);
// Useful snippet for error checking
// hipError_t error = hipGetLastError();
// if(error != hipSuccess)
// {
// std::cout<<"CUDA error: "<<hipGetErrorString(error)<<std::endl;
// exit(-1);
// }
}
__host__ void GPUInterface::get_device_properties()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
for(int dev = 0; dev < deviceCount; dev++)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl;
std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl;
std::cout<<"Number of multiprocessors: "<<deviceProp.multiProcessorCount<<std::endl;
std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl;
std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl;
std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl;
std::cout<<"Max Registers per Block: "<<deviceProp.regsPerBlock<<std::endl;
std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl;
std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl;
std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl;
std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl;
}
}
| 6c49eb6bf2ee6d1ba796f1e86e8a76a8dc5a611f.cu | #include <cmath>
#include <iostream>
#include <time.h>
#include "gpu-new-forward.h"
#define TILE_WIDTH 16
__global__ void conv_forward_kernel(float *y, const float * __restrict__ x, const float * __restrict__ k, const int B, const int M, const int C, const int H, const int W, const int K)
{
/*
Modify this function to implement the forward pass described in Chapter 16.
We have added an additional dimension to the tensors to support an entire mini-batch
The goal here is to be correct AND fast.
Function paramter definitions:
y - output
x - input
k - kernel
B - batch_size (number of images in x)
M - number of output feature maps
C - number of input feature maps
H - input height dimension
W - input width dimension
K - kernel height and width (K x K)
*/
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own.
// An example use of these macros:
// float a = y4d(0,0,0,0)
// y4d(0,0,0,0) = a
#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
#define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0]
// Insert your GPU convolution kernel code here
const int W_grid = ceil(1.*W_out/TILE_WIDTH);
const int H_grid = ceil(1.*H_out/TILE_WIDTH);
int n = blockIdx.x;
int m = blockIdx.y;
int h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
int w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
// out of bounds
if (h < H_out && w < W_out) {
float acc = 0.0f;
#pragma unroll
for (int c = 0; c < C; c++) { // sum over all input feature maps
#pragma unroll 7
for (int p = 0; p < K; p++) {
// KxK filter
#pragma unroll 7
for (int q = 0; q < K; q++) {
acc += x4d(n, c, h + p, w + q) * k4d(m, c, p, q);
}
}
}
y4d(n, m, h, w) = acc;
}
#undef y4d
#undef x4d
#undef k4d
}
__host__ void GPUInterface::conv_forward_gpu1(float *host_y, const float *host_x, const float *host_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// Declare relevant device pointers
float* device_y;
float* device_x;
float* device_k;
// Allocate memory and copy over the relevant data structures to the GPU
cudaMalloc((void**) &device_y, H_out * W_out * M * B * sizeof(float));
cudaMalloc((void**) &device_x, H * W * C * B * sizeof(float));
cudaMalloc((void**) &device_k, K * K * M * C * sizeof(float));
cudaMemcpy(device_x, host_x, H * W * C * B * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_k, host_k, K * K * M * C * sizeof(float), cudaMemcpyHostToDevice);
// Set the kernel dimensions and call the kernel
int W_grid = ceil(W_out/(1.0 * TILE_WIDTH)); // number of horizontal tiles per output map
int H_grid = ceil(H_out/(1.0 * TILE_WIDTH)); // number of vertical tiles per output map
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(B, M, Z);
conv_forward_kernel<<<gridDim, blockDim>>>(device_y,device_x,device_k,B,M,C,H,W,K);
// Copy the output back to host
cudaMemcpy(host_y, device_y, H_out * W_out * M * B * sizeof(float), cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(device_x);
cudaFree(device_y);
cudaFree(device_k);
// Useful snippet for error checking
// cudaError_t error = cudaGetLastError();
// if(error != cudaSuccess)
// {
// std::cout<<"CUDA error: "<<cudaGetErrorString(error)<<std::endl;
// exit(-1);
// }
}
__host__ void GPUInterface::get_device_properties()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for(int dev = 0; dev < deviceCount; dev++)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl;
std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl;
std::cout<<"Number of multiprocessors: "<<deviceProp.multiProcessorCount<<std::endl;
std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl;
std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl;
std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl;
std::cout<<"Max Registers per Block: "<<deviceProp.regsPerBlock<<std::endl;
std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl;
std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl;
std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl;
std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl;
}
}
|
a6d93cf4e04f9e0f4fa0566c8cedb071ccff5396.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RayTracing.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "SphereCreator.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rayTracing(uchar4* ptrDevPixels,int nbspheres, Sphere* ptrDevTabSphere,uint w, uint h,float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
RayTracing::RayTracing(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "RayTracing_Cuda_Luy")
{
// Time
this->dt = dt;
// Inputs
this->t = 0; // protected dans Animable
this->nbSphere = 30;
this->sizeOctet = sizeof(Sphere)*nbSphere;
SphereCreator sphereCreator(nbSphere, w, h);
Sphere* ptrTabSphere = sphereCreator.getTabSphere();
this->toGM(ptrTabSphere);
}
RayTracing::~RayTracing()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void RayTracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
Device::lastCudaError("rayTracing rgba uchar4 (before)"); // facultatif, for debug only, remove for release
hipLaunchKernelGGL(( rayTracing), dim3(dg),dim3(db), 0, 0, ptrDevPixels,this->nbSpheres, this->ptrDevTabSphere, w,h,t);
Device::lastCudaError("rayTracing rgba uchar4 (after)"); // facultatif, for debug only, remove for release
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::animationStep()
{
t += dt;
}
void RayTracing::toGM(Sphere* ptrTabSphere)
{
Device::memclear(&ptrDevTabSphere, sizeOctet);
Device::malloc(&ptrDevTabSphere, sizeOctet);
Device::memcpyHToD(ptrDevTabSphere, ptrTabSphere, sizeOctet);
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| a6d93cf4e04f9e0f4fa0566c8cedb071ccff5396.cu | #include "RayTracing.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "SphereCreator.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rayTracing(uchar4* ptrDevPixels,int nbspheres, Sphere* ptrDevTabSphere,uint w, uint h,float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
RayTracing::RayTracing(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "RayTracing_Cuda_Luy")
{
// Time
this->dt = dt;
// Inputs
this->t = 0; // protected dans Animable
this->nbSphere = 30;
this->sizeOctet = sizeof(Sphere)*nbSphere;
SphereCreator sphereCreator(nbSphere, w, h);
Sphere* ptrTabSphere = sphereCreator.getTabSphere();
this->toGM(ptrTabSphere);
}
RayTracing::~RayTracing()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void RayTracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
Device::lastCudaError("rayTracing rgba uchar4 (before)"); // facultatif, for debug only, remove for release
rayTracing<<<dg,db>>>(ptrDevPixels,this->nbSpheres, this->ptrDevTabSphere, w,h,t);
Device::lastCudaError("rayTracing rgba uchar4 (after)"); // facultatif, for debug only, remove for release
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::animationStep()
{
t += dt;
}
void RayTracing::toGM(Sphere* ptrTabSphere)
{
Device::memclear(&ptrDevTabSphere, sizeOctet);
Device::malloc(&ptrDevTabSphere, sizeOctet);
Device::memcpyHToD(ptrDevTabSphere, ptrTabSphere, sizeOctet);
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
7e41cc237444df316c8dfcdd6864b1f475ff1289.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "idwt_per_Y_1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_dst = NULL;
hipMalloc(&d_dst, XSIZE*YSIZE);
float *src_A = NULL;
hipMalloc(&src_A, XSIZE*YSIZE);
float *src_D = NULL;
hipMalloc(&src_D, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int next_rows = 1;
int filt_len = 1;
int halo = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
idwt_per_Y_1), dim3(gridBlock),dim3(threadBlock), 0, 0, d_dst,src_A,src_D,rows,cols,next_rows,filt_len,halo);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
idwt_per_Y_1), dim3(gridBlock),dim3(threadBlock), 0, 0, d_dst,src_A,src_D,rows,cols,next_rows,filt_len,halo);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
idwt_per_Y_1), dim3(gridBlock),dim3(threadBlock), 0, 0, d_dst,src_A,src_D,rows,cols,next_rows,filt_len,halo);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7e41cc237444df316c8dfcdd6864b1f475ff1289.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "idwt_per_Y_1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_dst = NULL;
cudaMalloc(&d_dst, XSIZE*YSIZE);
float *src_A = NULL;
cudaMalloc(&src_A, XSIZE*YSIZE);
float *src_D = NULL;
cudaMalloc(&src_D, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int next_rows = 1;
int filt_len = 1;
int halo = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
idwt_per_Y_1<<<gridBlock,threadBlock>>>(d_dst,src_A,src_D,rows,cols,next_rows,filt_len,halo);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
idwt_per_Y_1<<<gridBlock,threadBlock>>>(d_dst,src_A,src_D,rows,cols,next_rows,filt_len,halo);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
idwt_per_Y_1<<<gridBlock,threadBlock>>>(d_dst,src_A,src_D,rows,cols,next_rows,filt_len,halo);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6cfb9293f826ee6f7de7f58b64776ba4c8065722.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
// includes thrust
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
using namespace thrust::placeholders;
////////////////////////////////////////////////////////////////////////////////
// Inline functions
////////////////////////////////////////////////////////////////////////////////
inline __device__ float calculateDistanceSquared(
float x1, float y1, float z1,
float x2, float y2, float z2) {
return (x1 - x2)*(x1 - x2) +
(y1 - y2)*(y1 - y2) +
(z1 - z2)*(z1 - z2);
}
inline float random_float(){
return (float)rand()/(float)RAND_MAX;
}
////////////////////////////////////////////////////////////////////////////////
// Kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void
calculateDistances(float *points_x, float* points_y, float* points_z,
float *centroid_x, float* centroid_y, float* centroid_z,
float *data_x, float* data_y, float* data_z,
int *closest, int k, int n)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n){
unsigned index_min;
float min_distance = 10e9;
for(int i = 0; i < k; i++){
float distance = calculateDistanceSquared(
points_x[tid], points_y[tid], points_z[tid],
centroid_x[i], centroid_y[i], centroid_z[i]);
if(min_distance > distance){
index_min = i;
min_distance = distance;
}
}
data_x[tid + index_min * n] = points_x[tid];
data_y[tid + index_min * n] = points_y[tid];
data_z[tid + index_min * n] = points_z[tid];
closest[tid + index_min * n] = 1;
}
}
__global__ void reduce(
float *centroid_x, float* centroid_y, float* centroid_z,
float *data_x, float* data_y, float* data_z,
int *closest, int *sums, int k, int n) {
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < k * n){
if(closest[tid] != 0){
int centroid_num = tid / n;
atomicAdd(¢roid_x[centroid_num], data_x[tid]);
atomicAdd(¢roid_y[centroid_num], data_y[tid]);
atomicAdd(¢roid_z[centroid_num], data_z[tid]);
atomicAdd(&sums[centroid_num], 1);
}
}
}
__global__ void
calculateMean(float *centroid_x, float* centroid_y, float* centroid_z, int *sums, int k)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < k){
int sum = sums[tid];
if(sum != 0){
centroid_x[tid] /= sums[tid];
centroid_y[tid] /= sums[tid];
centroid_z[tid] /= sums[tid];
} else {
centroid_x[tid] = 0.0f;
centroid_y[tid] = 0.0f;
centroid_z[tid] = 0.0f;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Function headers
////////////////////////////////////////////////////////////////////////////////
void randomize(thrust::host_vector<float>& point_x, thrust::host_vector<float>& point_y, thrust::host_vector<float>& point_z,
thrust::host_vector<float>& centroid_x, thrust::host_vector<float>& centroid_y, thrust::host_vector<float>& centroid_z,
int k, int n);
bool stop(thrust::host_vector<float>& h_centroid_x, thrust::host_vector<float>& h_centroid_y, thrust::host_vector<float>& h_centroid_z,
thrust::device_vector<float>& d_centroid_x, thrust::device_vector<float>& d_centroid_y, thrust::device_vector<float>& d_centroid_z,
int k, float epsilon);
void write(thrust::host_vector<float>& h_x, thrust::host_vector<float>& h_y, thrust::host_vector<float>& h_z, int n, const char* filename);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv){
srand(0);
//setup parameters
int k = 50, n = 32000;
float epsilon = 0.0001f;
//initialize host vectors
thrust::host_vector<float> h_points_x(n), h_points_y(n), h_points_z(n);
thrust::host_vector<float> h_centroids_x(k), h_centroids_y(k), h_centroids_z(k);
//generate data
randomize(h_points_x, h_points_y, h_points_z, h_centroids_x, h_centroids_y, h_centroids_z, k, n);
//initialize device vectors, copy data from host vectors
thrust::device_vector<float> d_points_x(h_points_x), d_points_y(h_points_y), d_points_z(h_points_z);
thrust::device_vector<float> d_centroids_x = h_centroids_x, d_centroids_y = h_centroids_y, d_centroids_z = h_centroids_z;
thrust::device_vector<float> d_data_x(k*n), d_data_y(k*n), d_data_z(k*n);
thrust::device_vector<int> d_closest(k*n);
thrust::device_vector<int> d_sums(k);
//start timers
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// setup execution parameters
dim3 grid(n / 256 + 1, 1, 1);
dim3 threads(256, 1, 1);
dim3 grid2(k / 1024 + 1, 1, 1);
dim3 threads2(1024, 1, 1);
dim3 grid3((k * n) / 1024 + 1, 1, 1);
dim3 threads3(1024, 1, 1);
int iter = 0;
do {
//clear data
thrust::fill(d_data_x.begin(), d_data_x.end(), 0.0f);
thrust::fill(d_data_y.begin(), d_data_y.end(), 0.0f);
thrust::fill(d_data_z.begin(), d_data_z.end(), 0.0f);
thrust::fill(d_closest.begin(), d_closest.end(), 0);
//for each point in data set find closest centroid
hipLaunchKernelGGL(( calculateDistances), dim3(grid), dim3(threads) , 0, 0,
thrust::raw_pointer_cast(&d_points_x[0]),
thrust::raw_pointer_cast(&d_points_y[0]),
thrust::raw_pointer_cast(&d_points_z[0]),
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_data_x[0]),
thrust::raw_pointer_cast(&d_data_y[0]),
thrust::raw_pointer_cast(&d_data_z[0]),
thrust::raw_pointer_cast(&d_closest[0]),
k, n);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//clear old centroids data
thrust::fill(d_centroids_x.begin(), d_centroids_x.end(), 0.0f);
thrust::fill(d_centroids_y.begin(), d_centroids_y.end(), 0.0f);
thrust::fill(d_centroids_z.begin(), d_centroids_z.end(), 0.0f);
thrust::fill(d_sums.begin(), d_sums.end(), 0);
//sum up for each centroid distance to point from point's perspective
hipLaunchKernelGGL(( reduce), dim3(grid3), dim3(threads3) , 0, 0,
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_data_x[0]),
thrust::raw_pointer_cast(&d_data_y[0]),
thrust::raw_pointer_cast(&d_data_z[0]),
thrust::raw_pointer_cast(&d_closest[0]),
thrust::raw_pointer_cast(&d_sums[0]),
k, n);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//now calculate mean from the previously calculated sum it is a new centroid
hipLaunchKernelGGL(( calculateMean), dim3(grid2), dim3(threads2) , 0, 0,
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_sums[0]), k);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//one iteration done
iter = iter + 1;
} while(
//check if change is small compared to the last iteration
!stop(h_centroids_x, h_centroids_y, h_centroids_z,
d_centroids_x, d_centroids_y, d_centroids_z,
k, epsilon) || iter > 100);
//stop timers and print summary
sdkStopTimer(&timer);
printf("Processing time: %f (ms), %d iterations\n", sdkGetTimerValue(&timer), iter);
sdkDeleteTimer(&timer);
//write output of the program to a file
write(h_points_x, h_points_y, h_points_z, n, "points.txt");
write(h_centroids_x, h_centroids_y, h_centroids_z, k, "centroids.txt");
printf("Exiting...\n");
exit(EXIT_SUCCESS);
}
//generate data
void randomize(thrust::host_vector<float>& point_x, thrust::host_vector<float>& point_y, thrust::host_vector<float>& point_z,
thrust::host_vector<float>& centroid_x, thrust::host_vector<float>& centroid_y, thrust::host_vector<float>& centroid_z,
int k, int n){
for(int i = 0; i < k; i++){
float x = random_float();
float y = random_float();
float z = random_float();
centroid_x[i] = x;
centroid_y[i] = y;
centroid_z[i] = z;
}
for(int i = 0; i < n; i++){
float x = random_float();
float y = random_float();
float z = random_float();
point_x[i] = x;
point_y[i] = y;
point_z[i] = z;
}
}
//check if alghoritm should stop, i.e. if norm of centroids vector is lesser
//than given epsilon
bool stop(thrust::host_vector<float>& h_centroid_x, thrust::host_vector<float>& h_centroid_y, thrust::host_vector<float>& h_centroid_z,
thrust::device_vector<float>& d_centroid_x, thrust::device_vector<float>& d_centroid_y, thrust::device_vector<float>& d_centroid_z,
int k, float epsilon){
thrust::host_vector<float>
h_centroid_x_new(d_centroid_x),
h_centroid_y_new(d_centroid_y),
h_centroid_z_new(d_centroid_z);
float norm = 0.0f;
for(int i = 0; i < k; i++){
norm += abs(h_centroid_x_new[i] - h_centroid_x[i]) +
abs(h_centroid_y_new[i] - h_centroid_y[i]) +
abs(h_centroid_z_new[i] - h_centroid_z[i]);
}
norm /= (k * 3);
h_centroid_x = h_centroid_x_new;
h_centroid_y = h_centroid_y_new;
h_centroid_z = h_centroid_z_new;
printf("norm: %f\n", norm);
if(norm > epsilon) return false;
else return true;
}
// writes vectors to a specified file
void write(thrust::host_vector<float>& h_x, thrust::host_vector<float>& h_y, thrust::host_vector<float>& h_z, int n, const char* filename){
std::ofstream myfile;
myfile.open(filename);
for(int i = 0; i < n; i++){
myfile << h_x[i] << " " << h_y[i] << " " << h_z[i] << " " << std::endl;
}
myfile.close();
}
| 6cfb9293f826ee6f7de7f58b64776ba4c8065722.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
// includes thrust
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
using namespace thrust::placeholders;
////////////////////////////////////////////////////////////////////////////////
// Inline functions
////////////////////////////////////////////////////////////////////////////////
inline __device__ float calculateDistanceSquared(
float x1, float y1, float z1,
float x2, float y2, float z2) {
return (x1 - x2)*(x1 - x2) +
(y1 - y2)*(y1 - y2) +
(z1 - z2)*(z1 - z2);
}
inline float random_float(){
return (float)rand()/(float)RAND_MAX;
}
////////////////////////////////////////////////////////////////////////////////
// Kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void
calculateDistances(float *points_x, float* points_y, float* points_z,
float *centroid_x, float* centroid_y, float* centroid_z,
float *data_x, float* data_y, float* data_z,
int *closest, int k, int n)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n){
unsigned index_min;
float min_distance = 10e9;
for(int i = 0; i < k; i++){
float distance = calculateDistanceSquared(
points_x[tid], points_y[tid], points_z[tid],
centroid_x[i], centroid_y[i], centroid_z[i]);
if(min_distance > distance){
index_min = i;
min_distance = distance;
}
}
data_x[tid + index_min * n] = points_x[tid];
data_y[tid + index_min * n] = points_y[tid];
data_z[tid + index_min * n] = points_z[tid];
closest[tid + index_min * n] = 1;
}
}
__global__ void reduce(
float *centroid_x, float* centroid_y, float* centroid_z,
float *data_x, float* data_y, float* data_z,
int *closest, int *sums, int k, int n) {
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < k * n){
if(closest[tid] != 0){
int centroid_num = tid / n;
atomicAdd(¢roid_x[centroid_num], data_x[tid]);
atomicAdd(¢roid_y[centroid_num], data_y[tid]);
atomicAdd(¢roid_z[centroid_num], data_z[tid]);
atomicAdd(&sums[centroid_num], 1);
}
}
}
__global__ void
calculateMean(float *centroid_x, float* centroid_y, float* centroid_z, int *sums, int k)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < k){
int sum = sums[tid];
if(sum != 0){
centroid_x[tid] /= sums[tid];
centroid_y[tid] /= sums[tid];
centroid_z[tid] /= sums[tid];
} else {
centroid_x[tid] = 0.0f;
centroid_y[tid] = 0.0f;
centroid_z[tid] = 0.0f;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Function headers
////////////////////////////////////////////////////////////////////////////////
void randomize(thrust::host_vector<float>& point_x, thrust::host_vector<float>& point_y, thrust::host_vector<float>& point_z,
thrust::host_vector<float>& centroid_x, thrust::host_vector<float>& centroid_y, thrust::host_vector<float>& centroid_z,
int k, int n);
bool stop(thrust::host_vector<float>& h_centroid_x, thrust::host_vector<float>& h_centroid_y, thrust::host_vector<float>& h_centroid_z,
thrust::device_vector<float>& d_centroid_x, thrust::device_vector<float>& d_centroid_y, thrust::device_vector<float>& d_centroid_z,
int k, float epsilon);
void write(thrust::host_vector<float>& h_x, thrust::host_vector<float>& h_y, thrust::host_vector<float>& h_z, int n, const char* filename);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv){
srand(0);
//setup parameters
int k = 50, n = 32000;
float epsilon = 0.0001f;
//initialize host vectors
thrust::host_vector<float> h_points_x(n), h_points_y(n), h_points_z(n);
thrust::host_vector<float> h_centroids_x(k), h_centroids_y(k), h_centroids_z(k);
//generate data
randomize(h_points_x, h_points_y, h_points_z, h_centroids_x, h_centroids_y, h_centroids_z, k, n);
//initialize device vectors, copy data from host vectors
thrust::device_vector<float> d_points_x(h_points_x), d_points_y(h_points_y), d_points_z(h_points_z);
thrust::device_vector<float> d_centroids_x = h_centroids_x, d_centroids_y = h_centroids_y, d_centroids_z = h_centroids_z;
thrust::device_vector<float> d_data_x(k*n), d_data_y(k*n), d_data_z(k*n);
thrust::device_vector<int> d_closest(k*n);
thrust::device_vector<int> d_sums(k);
//start timers
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// setup execution parameters
dim3 grid(n / 256 + 1, 1, 1);
dim3 threads(256, 1, 1);
dim3 grid2(k / 1024 + 1, 1, 1);
dim3 threads2(1024, 1, 1);
dim3 grid3((k * n) / 1024 + 1, 1, 1);
dim3 threads3(1024, 1, 1);
int iter = 0;
do {
//clear data
thrust::fill(d_data_x.begin(), d_data_x.end(), 0.0f);
thrust::fill(d_data_y.begin(), d_data_y.end(), 0.0f);
thrust::fill(d_data_z.begin(), d_data_z.end(), 0.0f);
thrust::fill(d_closest.begin(), d_closest.end(), 0);
//for each point in data set find closest centroid
calculateDistances<<< grid, threads >>>(
thrust::raw_pointer_cast(&d_points_x[0]),
thrust::raw_pointer_cast(&d_points_y[0]),
thrust::raw_pointer_cast(&d_points_z[0]),
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_data_x[0]),
thrust::raw_pointer_cast(&d_data_y[0]),
thrust::raw_pointer_cast(&d_data_z[0]),
thrust::raw_pointer_cast(&d_closest[0]),
k, n);
cudaDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//clear old centroids data
thrust::fill(d_centroids_x.begin(), d_centroids_x.end(), 0.0f);
thrust::fill(d_centroids_y.begin(), d_centroids_y.end(), 0.0f);
thrust::fill(d_centroids_z.begin(), d_centroids_z.end(), 0.0f);
thrust::fill(d_sums.begin(), d_sums.end(), 0);
//sum up for each centroid distance to point from point's perspective
reduce<<< grid3, threads3 >>>(
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_data_x[0]),
thrust::raw_pointer_cast(&d_data_y[0]),
thrust::raw_pointer_cast(&d_data_z[0]),
thrust::raw_pointer_cast(&d_closest[0]),
thrust::raw_pointer_cast(&d_sums[0]),
k, n);
cudaDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//now calculate mean from the previously calculated sum it is a new centroid
calculateMean<<< grid2, threads2 >>>(
thrust::raw_pointer_cast(&d_centroids_x[0]),
thrust::raw_pointer_cast(&d_centroids_y[0]),
thrust::raw_pointer_cast(&d_centroids_z[0]),
thrust::raw_pointer_cast(&d_sums[0]), k);
cudaDeviceSynchronize();
getLastCudaError("Kernel execution failed");
//one iteration done
iter = iter + 1;
} while(
//check if change is small compared to the last iteration
!stop(h_centroids_x, h_centroids_y, h_centroids_z,
d_centroids_x, d_centroids_y, d_centroids_z,
k, epsilon) || iter > 100);
//stop timers and print summary
sdkStopTimer(&timer);
printf("Processing time: %f (ms), %d iterations\n", sdkGetTimerValue(&timer), iter);
sdkDeleteTimer(&timer);
//write output of the program to a file
write(h_points_x, h_points_y, h_points_z, n, "points.txt");
write(h_centroids_x, h_centroids_y, h_centroids_z, k, "centroids.txt");
printf("Exiting...\n");
exit(EXIT_SUCCESS);
}
//generate data
void randomize(thrust::host_vector<float>& point_x, thrust::host_vector<float>& point_y, thrust::host_vector<float>& point_z,
thrust::host_vector<float>& centroid_x, thrust::host_vector<float>& centroid_y, thrust::host_vector<float>& centroid_z,
int k, int n){
for(int i = 0; i < k; i++){
float x = random_float();
float y = random_float();
float z = random_float();
centroid_x[i] = x;
centroid_y[i] = y;
centroid_z[i] = z;
}
for(int i = 0; i < n; i++){
float x = random_float();
float y = random_float();
float z = random_float();
point_x[i] = x;
point_y[i] = y;
point_z[i] = z;
}
}
//check if alghoritm should stop, i.e. if norm of centroids vector is lesser
//than given epsilon
bool stop(thrust::host_vector<float>& h_centroid_x, thrust::host_vector<float>& h_centroid_y, thrust::host_vector<float>& h_centroid_z,
thrust::device_vector<float>& d_centroid_x, thrust::device_vector<float>& d_centroid_y, thrust::device_vector<float>& d_centroid_z,
int k, float epsilon){
thrust::host_vector<float>
h_centroid_x_new(d_centroid_x),
h_centroid_y_new(d_centroid_y),
h_centroid_z_new(d_centroid_z);
float norm = 0.0f;
for(int i = 0; i < k; i++){
norm += abs(h_centroid_x_new[i] - h_centroid_x[i]) +
abs(h_centroid_y_new[i] - h_centroid_y[i]) +
abs(h_centroid_z_new[i] - h_centroid_z[i]);
}
norm /= (k * 3);
h_centroid_x = h_centroid_x_new;
h_centroid_y = h_centroid_y_new;
h_centroid_z = h_centroid_z_new;
printf("norm: %f\n", norm);
if(norm > epsilon) return false;
else return true;
}
// writes vectors to a specified file
void write(thrust::host_vector<float>& h_x, thrust::host_vector<float>& h_y, thrust::host_vector<float>& h_z, int n, const char* filename){
std::ofstream myfile;
myfile.open(filename);
for(int i = 0; i < n; i++){
myfile << h_x[i] << " " << h_y[i] << " " << h_z[i] << " " << std::endl;
}
myfile.close();
}
|
4a29fbd34ee37b91c9aa68eab7477e943487e561.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_methods.h"
#include <device_launch_parameters.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
__global__ void rgb2grayKernel(unsigned char *imgr,unsigned char *imgg,unsigned char *imgb,unsigned char *img_gray, int n) {
int r, g, b;
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n){
r = imgr[index];
g = imgg[index];
b = imgb[index];
img_gray[index] = (unsigned char)( 0.299*r + 0.587*g + 0.114*b);
}
}
void rgb2gray(unsigned char *imgr, unsigned char *imgg, unsigned char *imgb, unsigned char *img_gray, int n){
unsigned char *imgr_cuda, *imgg_cuda, *imgb_cuda;
unsigned char *img_gray_cuda;
unsigned int nBytes = sizeof(unsigned char) * n;
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
hipMalloc((void **)&imgr_cuda, nBytes);
hipMalloc((void **)&imgg_cuda, nBytes);
hipMalloc((void **)&imgb_cuda, nBytes);
hipMalloc((void **)&img_gray_cuda, nBytes);
hipMemcpy(imgr_cuda, imgr, nBytes, hipMemcpyHostToDevice);
hipMemcpy(imgg_cuda, imgg, nBytes, hipMemcpyHostToDevice);
hipMemcpy(imgb_cuda, imgb, nBytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( rgb2grayKernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, imgr_cuda, imgg_cuda, imgb_cuda, img_gray_cuda, n);
hipMemcpy(img_gray, img_gray_cuda, nBytes, hipMemcpyDeviceToHost);
hipFree(imgr_cuda);
hipFree(imgg_cuda);
hipFree(imgb_cuda);
hipFree(img_gray_cuda);
}
| 4a29fbd34ee37b91c9aa68eab7477e943487e561.cu | #include "cuda_methods.h"
#include <device_launch_parameters.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
__global__ void rgb2grayKernel(unsigned char *imgr,unsigned char *imgg,unsigned char *imgb,unsigned char *img_gray, int n) {
int r, g, b;
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n){
r = imgr[index];
g = imgg[index];
b = imgb[index];
img_gray[index] = (unsigned char)( 0.299*r + 0.587*g + 0.114*b);
}
}
void rgb2gray(unsigned char *imgr, unsigned char *imgg, unsigned char *imgb, unsigned char *img_gray, int n){
unsigned char *imgr_cuda, *imgg_cuda, *imgb_cuda;
unsigned char *img_gray_cuda;
unsigned int nBytes = sizeof(unsigned char) * n;
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
cudaMalloc((void **)&imgr_cuda, nBytes);
cudaMalloc((void **)&imgg_cuda, nBytes);
cudaMalloc((void **)&imgb_cuda, nBytes);
cudaMalloc((void **)&img_gray_cuda, nBytes);
cudaMemcpy(imgr_cuda, imgr, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(imgg_cuda, imgg, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(imgb_cuda, imgb, nBytes, cudaMemcpyHostToDevice);
rgb2grayKernel<<<blocksPerGrid,threadsPerBlock>>>(imgr_cuda, imgg_cuda, imgb_cuda, img_gray_cuda, n);
cudaMemcpy(img_gray, img_gray_cuda, nBytes, cudaMemcpyDeviceToHost);
cudaFree(imgr_cuda);
cudaFree(imgg_cuda);
cudaFree(imgb_cuda);
cudaFree(img_gray_cuda);
}
|
a68a9d271a7253f44dde963dab5481ea8c78b9a4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* To compile:
* nvcc -o lr_cuda lr_cuda.cu -lm
*
* To run:
* ./lr_cuda
*****************************************************************************/
typedef struct points_t {
double x;
double y;
} points_t;
int ndata = 1000;
__device__ int d_n_data = 1000;
points_t data[] = {
{87.72,160.36},{79.77,154.56},{74.88,144.94},{67.05,157.02},
{73.75,121.36},{69.58,132.51},{67.21,143.57},{72.13,141.31},
{87.64,144.77},{65.42,146.46},{83.09,144.82},{73.30,137.10},
{76.75,151.67},{69.17,144.50},{87.61,171.62},{83.56,164.97},
{75.57,137.55},{73.80,156.25},{75.75,130.90},{97.13,193.14},
{ 6.54,74.00},{54.31,100.05},{57.36,127.37},{93.54,172.18},
{98.26,190.44},{87.27,149.69},{66.44,143.27},{98.24,171.56},
{27.74,98.27},{10.51,57.95},{88.17,179.14},{22.66,63.64},
{30.46,64.81},{65.24,135.15},{ 1.71,39.30},{91.29,171.76},
{77.43,153.66},{68.29,132.07},{33.43,82.08},{60.40,119.17},
{15.86,64.98},{61.96,126.18},{11.05,58.97},{23.99,67.74},
{21.13,65.78},{34.79,97.87},{22.32,74.86},{78.29,136.48},
{25.38,84.45},{94.49,169.91},{35.62,99.82},{58.20,127.53},
{98.37,182.22},{93.79,168.74},{26.98,91.60},{30.55,103.77},
{ 5.86,44.79},{78.96,159.17},{93.19,158.82},{24.73,51.92},
{94.91,186.12},{61.84,135.07},{ 2.60,37.48},{95.93,189.32},
{95.39,157.13},{20.24,69.22},{29.93,71.36},{68.29,148.83},
{36.68,92.16},{93.29,180.89},{30.53,75.37},{48.10,120.04},
{83.17,153.60},{ 8.22,50.63},{ 8.76,50.80},{97.71,171.61},
{13.28,46.71},{ 1.07,43.74},{11.16,71.21},{20.98,78.43},
{21.89,50.58},{11.51,55.46},{ 3.22,43.94},{41.33,86.40},
{ 9.72,66.88},{10.91,51.56},{40.75,100.96},{81.92,161.87},
{54.44,112.70},{11.35,62.04},{69.49,123.86},{50.51,96.47},
{74.84,150.85},{77.35,150.52},{81.72,163.59},{11.65,37.53},
{13.48,52.63},{79.02,166.24},{84.93,171.49},{59.13,117.54},
{70.14,129.94},{58.90,118.01},{28.64,107.03},{43.71,112.86},
{58.86,114.54},{78.67,160.78},{94.74,171.99},{70.31,122.40},
{16.20,74.80},{58.59,110.98},{70.81,130.09},{ 7.86,46.69},
{91.46,153.64},{34.05,87.81},{24.06,68.39},{66.23,134.24},
{58.69,121.12},{ 0.18,38.19},{20.48,63.34},{63.79,125.30},
{27.38,80.17},{39.12,100.73},{85.45,151.11},{48.89,92.37},
{81.54,155.19},{20.52,55.22},{16.33,56.39},{82.69,158.63},
{78.03,160.98},{ 3.36,62.70},{ 9.16,36.34},{ 5.56,50.25},
{96.94,185.14},{46.54,107.56},{13.32,54.76},{21.63,58.16},
{35.36,86.17},{92.63,175.36},{35.77,93.56},{24.99,73.63},
{78.27,153.33},{44.20,89.01},{ 8.41,28.53},{22.39,65.89},
{ 4.01,63.33},{61.57,131.79},{39.85,95.92},{76.59,132.38},
{49.60,117.48},{54.00,114.01},{92.20,160.82},{98.73,180.85},
{56.18,108.40},{75.10,144.54},{36.91,105.85},{36.52,75.99},
{47.65,79.93},{72.52,151.63},{50.78,112.17},{85.76,151.27},
{14.41,51.07},{48.66,109.44},{50.39,104.20},{95.63,174.36},
{69.74,140.10},{25.53,87.75},{67.15,141.26},{55.44,129.77},
{20.44,65.95},{98.96,181.37},{47.04,111.25},{58.45,117.28},
{57.79,123.58},{83.84,171.31},{61.97,131.09},{37.66,109.35},
{65.32,124.56},{32.31,92.17},{64.08,140.31},{53.53,119.28},
{46.02,102.80},{ 8.19,46.99},{75.24,134.02},{11.54,67.14},
{37.35,94.89},{17.90,73.66},{68.74,127.84},{35.61,74.80},
{12.89,61.03},{51.57,127.06},{69.44,137.16},{55.00,87.07},
{ 9.46,66.57},{ 7.32,50.65},{18.69,60.81},{91.80,157.00},
{23.13,80.31},{79.77,160.31},{25.92,67.57},{88.84,157.95},
{59.62,122.91},{20.02,74.74},{66.18,130.20},{22.33,67.57},
{34.98,72.13},{27.88,80.57},{74.11,152.93},{63.45,123.91},
{34.17,76.27},{59.99,137.07},{26.78,70.38},{30.99,79.08},
{ 3.26,42.81},{51.85,97.62},{38.00,89.05},{69.57,128.69},
{55.77,102.44},{38.64,95.04},{31.94,80.16},{33.57,84.21},
{49.34,115.16},{60.37,126.82},{48.79,79.20},{53.62,101.98},
{44.49,120.58},{40.72,92.32},{62.30,134.92},{86.71,167.45},
{28.92,90.49},{ 3.64,50.38},{ 2.98,51.03},{ 7.72,50.72},
{36.01,88.98},{12.80,53.33},{18.18,59.93},{ 8.74,39.72},
{93.05,157.51},{78.51,157.36},{ 9.32,51.91},{28.43,83.25},
{ 5.63,57.81},{ 5.75,49.78},{33.39,89.81},{98.06,183.34},
{61.33,124.19},{90.28,167.83},{97.93,180.90},{24.74,74.19},
{28.42,90.02},{67.13,126.96},{29.53,65.54},{28.34,82.58},
{ 5.75,35.03},{36.57,89.60},{18.70,68.23},{85.61,147.33},
{90.19,148.59},{29.44,82.07},{95.34,164.13},{25.79,76.89},
{18.18,55.48},{47.36,99.63},{ 9.34,53.88},{95.16,174.51},
{79.94,148.24},{ 1.82,30.86},{75.83,138.77},{15.10,60.29},
{34.45,91.55},{ 0.41,39.02},{12.90,64.51},{75.66,161.10},
{40.13,85.95},{57.37,109.43},{79.64,155.61},{25.54,76.82},
{33.13,80.14},{31.55,75.12},{ 0.93,51.83},{36.40,97.82},
{55.60,112.28},{ 1.26,47.44},{73.54,125.72},{49.81,102.51},
{97.31,170.44},{89.64,169.02},{91.89,169.61},{45.89,107.66},
{99.90,168.98},{39.27,106.49},{63.61,118.52},{60.86,115.28},
{ 3.11,48.70},{61.64,119.97},{28.18,81.53},{ 5.20,43.98},
{51.76,117.25},{57.63,127.76},{78.84,153.80},{10.54,48.23},
{52.51,115.61},{ 4.69,28.30},{93.93,164.59},{69.70,141.08},
{57.13,118.40},{23.45,86.44},{44.59,132.03},{31.91,98.03},
{44.44,103.85},{ 9.46,53.83},{92.57,190.48},{36.29,95.15},
{32.06,92.48},{86.16,138.27},{49.86,112.94},{96.14,181.82},
{16.05,59.03},{52.13,102.89},{51.27,109.44},{67.94,117.86},
{86.77,158.44},{74.20,143.83},{39.13,93.09},{66.00,137.66},
{22.48,55.93},{54.90,116.64},{51.71,95.82},{36.07,85.96},
{ 5.38,59.94},{84.49,160.75},{28.93,82.85},{89.92,183.08},
{ 1.83,61.19},{26.71,73.35},{49.96,93.82},{13.56,68.73},
{26.93,70.29},{85.19,165.22},{74.31,148.26},{44.90,107.17},
{81.60,159.66},{86.68,150.41},{ 8.77,66.43},{75.18,159.06},
{ 8.86,45.78},{66.61,131.48},{ 5.80,40.46},{84.73,169.37},
{35.34,85.87},{97.62,161.97},{15.22,73.73},{77.52,152.32},
{ 2.96,45.12},{60.66,127.28},{66.50,131.20},{72.85,141.01},
{65.90,130.75},{43.44,101.16},{11.06,52.18},{14.77,62.27},
{77.52,159.43},{47.99,126.74},{63.43,110.36},{50.26,113.37},
{95.43,198.57},{24.53,57.71},{12.87,54.28},{63.79,135.32},
{56.58,110.85},{59.10,121.23},{99.99,194.48},{34.56,83.22},
{98.83,173.25},{ 7.73,70.43},{34.44,103.18},{96.37,169.25},
{50.19,118.06},{84.66,175.92},{79.01,142.91},{99.12,174.11},
{61.97,120.41},{81.93,146.96},{36.18,71.82},{ 3.47,51.04},
{ 0.07,52.79},{82.02,158.42},{37.02,94.32},{26.83,77.25},
{87.90,166.44},{22.65,76.05},{ 8.80,53.89},{53.12,109.05},
{64.69,147.19},{55.56,107.54},{98.42,175.01},{ 4.99,30.66},
{63.23,122.97},{56.44,121.49},{ 8.58,69.79},{79.38,149.69},
{55.64,122.77},{84.76,142.77},{29.21,85.68},{18.11,70.14},
{33.15,85.38},{11.30,44.68},{83.67,163.51},{23.43,101.91},
{29.59,81.85},{19.90,75.18},{26.05,89.95},{61.05,129.50},
{ 6.27,68.93},{96.95,191.69},{82.78,145.43},{73.84,136.65},
{60.44,140.08},{12.67,65.86},{ 2.20,36.68},{ 6.16,54.50},
{35.28,95.48},{83.01,152.33},{64.33,121.41},{91.72,162.34},
{72.62,130.77},{55.31,121.78},{83.52,164.02},{ 1.45,24.37},
{84.96,164.70},{ 6.85,59.62},{89.95,158.91},{57.41,133.29},
{ 6.13,58.72},{78.33,125.38},{65.15,121.67},{19.10,81.33},
{17.43,58.99},{60.92,142.74},{69.37,142.70},{ 7.71,53.52},
{38.81,101.59},{18.31,65.35},{41.05,90.89},{29.84,96.28},
{27.65,88.04},{11.74,30.21},{72.14,150.80},{59.92,123.34},
{20.59,51.65},{73.54,147.09},{25.04,52.81},{21.44,78.92},
{ 8.30,68.18},{38.27,103.50},{76.73,135.46},{13.41,42.84},
{ 9.77,50.17},{31.79,84.64},{11.63,47.87},{81.10,154.34},
{32.88,86.39},{83.66,156.16},{42.84,101.97},{92.23,181.69},
{56.62,128.67},{21.57,72.57},{28.42,76.81},{78.49,151.43},
{34.76,87.12},{95.65,153.86},{48.99,114.03},{22.97,82.24},
{96.82,167.98},{55.42,93.17},{59.22,121.38},{41.66,101.95},
{77.91,166.28},{12.14,54.69},{91.24,171.96},{46.22,106.58},
{98.54,161.56},{46.98,104.41},{60.09,131.79},{67.83,119.99},
{41.09,85.10},{70.10,135.73},{36.99,91.78},{10.72,53.99},
{50.06,91.80},{18.39,60.80},{26.53,85.06},{76.65,154.26},
{ 0.02,34.66},{37.56,104.13},{91.48,177.82},{ 0.31,41.81},
{93.20,166.21},{ 6.94,43.44},{85.38,159.15},{90.74,139.02},
{83.80,157.57},{39.34,92.96},{95.49,170.28},{77.55,145.57},
{33.05,88.61},{ 4.28,43.15},{95.89,183.02},{36.18,97.42},
{94.88,160.54},{ 5.18,55.47},{24.04,66.69},{50.93,118.02},
{11.82,58.02},{ 4.55,62.84},{71.87,136.43},{ 1.64,46.82},
{86.64,157.08},{76.78,135.11},{17.94,59.70},{41.81,96.43},
{89.21,170.11},{75.89,139.66},{90.73,160.27},{45.39,101.75},
{61.07,135.71},{ 7.53,48.91},{26.32,74.50},{89.44,164.11},
{79.85,162.30},{55.91,104.02},{89.47,172.39},{88.09,154.40},
{87.63,148.99},{24.43,74.77},{49.28,103.08},{86.49,165.82},
{93.79,148.29},{93.25,170.91},{59.55,126.98},{ 9.24,64.46},
{73.82,134.35},{76.07,152.94},{77.44,148.98},{87.01,161.01},
{72.52,154.33},{21.77,97.29},{47.74,111.47},{17.34,68.45},
{39.75,119.04},{84.78,160.15},{62.57,121.76},{70.20,146.15},
{ 7.08,50.89},{60.33,97.44},{29.90,89.84},{41.82,95.48},
{38.27,101.32},{ 9.60,53.87},{84.69,166.21},{97.64,177.02},
{73.96,145.64},{11.68,73.29},{31.64,75.20},{44.12,119.54},
{29.91,99.48},{62.85,117.28},{65.55,123.70},{78.66,161.94},
{71.06,158.16},{71.17,147.50},{12.49,63.11},{62.47,146.21},
{ 5.52,64.77},{19.39,81.84},{90.73,177.46},{46.94,101.25},
{35.30,92.84},{25.31,80.86},{29.29,95.30},{79.68,160.63},
{65.64,143.96},{81.97,173.79},{58.68,123.92},{37.35,94.26},
{81.31,146.56},{10.82,34.36},{72.19,152.74},{96.81,157.39},
{37.82,84.01},{26.02,85.45},{49.68,99.80},{63.61,134.18},
{78.45,135.62},{28.06,99.20},{29.49,76.17},{38.73,90.19},
{66.67,128.82},{ 4.14,43.63},{75.01,155.68},{12.38,39.83},
{64.06,126.87},{77.44,154.01},{30.67,89.38},{30.38,85.81},
{98.17,180.45},{72.62,131.67},{18.80,74.37},{56.99,112.44},
{80.45,152.36},{43.87,96.62},{10.95,79.63},{ 8.13,48.84},
{96.47,180.58},{57.99,139.71},{29.81,82.98},{ 7.69,59.04},
{60.75,113.11},{61.26,127.28},{29.91,74.20},{72.81,130.29},
{97.22,186.22},{16.10,67.14},{45.22,82.91},{59.90,136.15},
{50.86,99.99},{40.09,89.91},{38.69,87.12},{38.22,86.34},
{82.85,160.54},{44.59,114.27},{ 6.39,49.22},{53.02,118.97},
{67.10,132.43},{87.17,167.48},{61.46,109.49},{79.66,163.17},
{40.28,88.74},{81.76,164.45},{10.26,58.64},{14.58,81.13},
{85.30,184.26},{64.06,132.71},{ 5.55,56.52},{96.97,187.38},
{92.22,174.95},{42.45,100.51},{30.79,81.24},{ 4.25,61.71},
{47.15,104.16},{35.87,86.39},{81.62,152.64},{42.46,95.25},
{66.69,137.47},{33.21,84.65},{23.42,84.12},{99.30,187.76},
{19.15,77.26},{17.74,70.35},{87.90,170.12},{47.01,118.00},
{78.63,155.19},{92.38,163.60},{72.75,153.70},{79.92,138.69},
{21.94,78.76},{55.51,120.91},{27.08,57.31},{12.83,45.59},
{48.22,103.52},{35.64,87.26},{59.90,119.91},{50.05,110.55},
{ 0.23,41.68},{66.03,129.51},{42.67,95.15},{37.78,103.08},
{ 3.06,43.68},{53.80,102.89},{ 9.78,51.90},{94.94,185.83},
{31.69,105.92},{70.50,123.84},{ 5.52,51.03},{ 0.93,47.63},
{68.12,146.17},{ 6.86,51.21},{ 4.60,42.38},{72.98,138.03},
{58.59,125.26},{40.21,88.92},{12.51,41.25},{31.12,65.03},
{75.68,143.15},{74.02,141.52},{ 5.61,50.98},{82.39,162.02},
{28.07,65.38},{71.22,145.28},{44.22,99.25},{72.03,123.62},
{45.88,95.49},{76.37,136.85},{29.19,81.03},{63.46,142.45},
{49.44,100.25},{81.71,132.07},{83.34,150.05},{38.88,93.91},
{86.01,172.44},{51.32,110.43},{86.82,154.64},{70.02,140.53},
{26.43,72.25},{34.48,91.08},{30.41,80.09},{24.77,80.43},
{ 8.14,53.39},{18.88,70.38},{26.90,73.96},{94.43,173.51},
{24.45,62.18},{56.07,111.25},{66.96,136.85},{93.78,188.93},
{75.18,144.91},{18.22,43.66},{97.70,170.91},{34.25,95.34},
{12.16,53.60},{88.48,164.67},{81.58,176.86},{81.96,151.45},
{50.13,109.26},{44.20,90.81},{52.84,121.91},{17.30,76.64},
{53.60,120.26},{32.01,84.79},{72.56,149.71},{19.15,55.05},
{78.26,164.32},{ 9.84,56.96},{ 2.48,50.11},{50.84,134.33},
{90.65,164.89},{35.58,81.09},{72.54,151.27},{54.39,119.73},
{44.15,105.72},{74.88,145.20},{86.66,158.64},{17.79,67.78},
{54.84,115.87},{99.10,173.71},{93.02,174.29},{23.52,83.88},
{19.56,68.81},{24.03,79.83},{11.73,35.38},{ 3.82,37.84},
{61.92,130.48},{77.02,139.38},{91.29,161.65},{98.09,162.25},
{ 0.49,36.07},{75.44,138.86},{ 4.32,59.64},{79.99,143.62},
{13.43,47.42},{44.44,110.46},{25.03,71.30},{71.95,147.86},
{78.51,152.59},{ 3.43,34.40},{55.28,115.97},{88.77,165.45},
{15.43,82.65},{99.09,179.00},{79.77,143.93},{52.73,116.02},
{52.40,109.06},{37.24,83.30},{31.90,80.93},{68.13,127.42},
{70.63,127.66},{55.84,132.35},{39.95,99.29},{ 6.84,40.38},
{66.47,117.89},{20.53,83.00},{82.22,147.22},{23.74,73.03},
{77.83,159.15},{11.29,64.73},{49.15,104.01},{52.54,105.95},
{93.36,160.71},{51.35,106.68},{28.56,83.86},{78.27,147.91},
{ 0.25,50.56},{59.93,120.88},{ 8.58,49.04},{74.24,134.96},
{51.22,98.10},{24.91,74.31},{87.07,160.74},{52.25,105.22},
{91.43,152.78},{ 8.10,59.46},{94.97,178.64},{88.81,178.24},
{88.45,150.89},{21.60,76.09},{70.62,122.85},{99.65,168.36},
{73.32,142.58},{13.18,71.21},{37.26,88.09},{79.15,142.60},
{20.05,91.09},{33.64,87.49},{21.84,64.04},{49.12,116.82},
{52.57,125.37},{42.43,93.45},{22.54,94.46},{82.51,165.14},
{77.64,132.17},{32.25,83.61},{10.77,55.92},{71.34,133.82},
{60.85,127.38},{22.68,79.72},{30.62,77.71},{81.90,161.50},
{10.22,47.19},{26.58,57.16},{43.66,113.00},{90.69,145.82},
{12.64,58.91},{85.90,154.22},{18.03,53.36},{84.49,144.57},
{87.51,169.54},{92.50,170.96},{51.99,123.08},{45.16,108.57},
{71.40,137.44},{58.36,121.84},{76.06,143.46},{42.17,104.52},
{ 2.57,50.10},{11.44,41.77},{71.09,143.92},{88.92,151.09},
{92.79,177.42},{90.72,157.64},{66.11,141.52},{ 2.33,38.80},
{76.26,158.72},{76.52,150.07},{70.31,132.13},{52.77,119.85},
{99.59,176.94},{ 8.16,56.11},{99.29,190.79},{25.00,77.22},
{13.45,63.42},{17.35,70.05},{ 4.16,35.31},{86.57,152.50},
{88.57,168.14},{67.96,123.67},{72.36,142.41},{10.95,73.06},
{78.45,163.31},{71.69,139.46},{82.78,157.91},{80.14,161.51},
{60.33,133.76},{ 9.44,46.00},{68.21,163.48},{30.78,88.27},
{38.74,105.24},{19.52,62.94},{49.03,105.82},{76.01,138.95},
{71.08,165.10},{49.97,108.11},{75.15,145.38},{ 5.20,62.33},
{97.13,188.35},{87.18,176.82},{42.70,96.24},{62.98,126.92},
{96.09,175.08},{90.77,190.99},{71.68,124.23},{15.67,61.12},
{95.37,178.30},{40.64,83.70},{22.64,71.16},{30.22,105.64},
{18.96,77.17},{56.47,98.36},{36.53,84.99},{13.11,73.50},
{32.04,80.23},{72.49,135.67},{54.33,126.59},{13.54,51.56},
{14.77,57.56},{24.09,90.04},{32.43,86.80},{ 3.82,43.03},
{81.10,163.58},{45.39,96.22},{57.29,115.98},{76.10,151.06},
{ 7.74,56.38},{48.95,108.35},{40.07,101.13},{81.91,144.88},
{64.47,124.53},{70.83,129.18},{ 7.05,44.06},{36.46,86.68},
{32.53,89.07},{32.88,77.92},{ 6.62,29.48},{28.87,76.01},
{37.36,90.05},{72.25,136.07},{81.47,173.47},{ 4.20,47.41},
{98.64,166.84},{46.61,109.47},{45.38,88.61},{95.41,169.40},
{66.63,122.42},{98.96,176.41},{77.60,166.70},{39.53,93.96},
{73.29,138.23},{87.99,159.87},{34.35,91.01},{33.30,78.44},
{29.29,78.05},{89.99,153.84},{ 3.90,31.79},{ 2.74,28.45},
{74.07,144.41},{59.60,135.80},{83.19,154.17},{33.14,71.48},
{71.18,127.25},{59.10,126.89},{14.88,60.57},{46.36,122.43},
{97.68,166.02},{47.91,110.00},{94.43,185.03},{25.13,73.98},
{30.66,83.04},{47.36,100.33},{20.03,57.00},{38.53,77.34},
{53.29,122.85},{77.72,146.52},{23.42,84.64},{96.85,170.99},
{ 8.49,68.27},{71.67,127.11},{84.22,158.75},{35.25,87.63},
{74.00,140.25},{32.42,91.48},{88.91,156.95},{88.11,163.18},
{60.49,132.55},{63.94,149.75},{95.21,172.49},{ 2.14,37.70},
{ 2.33,36.29},{24.38,83.62},{87.11,162.19},{37.16,85.65},
{81.57,152.86},{49.26,116.82},{54.72,108.56},{65.82,132.02},
{10.93,47.63},{71.92,111.28},{12.67,36.23},{67.35,101.56},
{86.25,169.18},{97.89,194.43},{40.63,106.58},{73.87,135.71}
};
double error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<ndata; i++) {
error_sum += error(data[i].x, data[i].y, m, c);
}
mean = error_sum / ndata;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, points_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
points_t *d_data;
be = rms_error(bm, bc);
error = hipMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "hipMalloc on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "hipMalloc on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = hipMemcpy(d_dm, dm, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dm returned %d %s\n", error,
hipGetErrorString(error));
}
error = hipMemcpy(d_dc, dc, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dc returned %d %s\n", error,
hipGetErrorString(error));
}
error = hipMemcpy(d_data, data, sizeof(data), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_data returned %d %s\n", error,
hipGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
hipLaunchKernelGGL(( d_rms_error) , dim3(100),dim3(10), 0, 0, &d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
hipDeviceSynchronize();
error = hipMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), hipMemcpyDeviceToHost);
if(error){
fprintf(stderr, "hipMemcpy to error_sum returned %d %s\n", error,
hipGetErrorString(error));
}
for(int j=0; j<ndata; j++) {
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / ndata;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0;
}
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
error = hipFree(d_dm);
if(error){
fprintf(stderr, "hipFree on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_dc);
if(error){
fprintf(stderr, "hipFree on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_data);
if(error){
fprintf(stderr, "hipFree on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_error_sum_arr);
if(error){
fprintf(stderr, "hipFree on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
| a68a9d271a7253f44dde963dab5481ea8c78b9a4.cu | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* To compile:
* nvcc -o lr_cuda lr_cuda.cu -lm
*
* To run:
* ./lr_cuda
*****************************************************************************/
typedef struct points_t {
double x;
double y;
} points_t;
int ndata = 1000;
__device__ int d_n_data = 1000;
points_t data[] = {
{87.72,160.36},{79.77,154.56},{74.88,144.94},{67.05,157.02},
{73.75,121.36},{69.58,132.51},{67.21,143.57},{72.13,141.31},
{87.64,144.77},{65.42,146.46},{83.09,144.82},{73.30,137.10},
{76.75,151.67},{69.17,144.50},{87.61,171.62},{83.56,164.97},
{75.57,137.55},{73.80,156.25},{75.75,130.90},{97.13,193.14},
{ 6.54,74.00},{54.31,100.05},{57.36,127.37},{93.54,172.18},
{98.26,190.44},{87.27,149.69},{66.44,143.27},{98.24,171.56},
{27.74,98.27},{10.51,57.95},{88.17,179.14},{22.66,63.64},
{30.46,64.81},{65.24,135.15},{ 1.71,39.30},{91.29,171.76},
{77.43,153.66},{68.29,132.07},{33.43,82.08},{60.40,119.17},
{15.86,64.98},{61.96,126.18},{11.05,58.97},{23.99,67.74},
{21.13,65.78},{34.79,97.87},{22.32,74.86},{78.29,136.48},
{25.38,84.45},{94.49,169.91},{35.62,99.82},{58.20,127.53},
{98.37,182.22},{93.79,168.74},{26.98,91.60},{30.55,103.77},
{ 5.86,44.79},{78.96,159.17},{93.19,158.82},{24.73,51.92},
{94.91,186.12},{61.84,135.07},{ 2.60,37.48},{95.93,189.32},
{95.39,157.13},{20.24,69.22},{29.93,71.36},{68.29,148.83},
{36.68,92.16},{93.29,180.89},{30.53,75.37},{48.10,120.04},
{83.17,153.60},{ 8.22,50.63},{ 8.76,50.80},{97.71,171.61},
{13.28,46.71},{ 1.07,43.74},{11.16,71.21},{20.98,78.43},
{21.89,50.58},{11.51,55.46},{ 3.22,43.94},{41.33,86.40},
{ 9.72,66.88},{10.91,51.56},{40.75,100.96},{81.92,161.87},
{54.44,112.70},{11.35,62.04},{69.49,123.86},{50.51,96.47},
{74.84,150.85},{77.35,150.52},{81.72,163.59},{11.65,37.53},
{13.48,52.63},{79.02,166.24},{84.93,171.49},{59.13,117.54},
{70.14,129.94},{58.90,118.01},{28.64,107.03},{43.71,112.86},
{58.86,114.54},{78.67,160.78},{94.74,171.99},{70.31,122.40},
{16.20,74.80},{58.59,110.98},{70.81,130.09},{ 7.86,46.69},
{91.46,153.64},{34.05,87.81},{24.06,68.39},{66.23,134.24},
{58.69,121.12},{ 0.18,38.19},{20.48,63.34},{63.79,125.30},
{27.38,80.17},{39.12,100.73},{85.45,151.11},{48.89,92.37},
{81.54,155.19},{20.52,55.22},{16.33,56.39},{82.69,158.63},
{78.03,160.98},{ 3.36,62.70},{ 9.16,36.34},{ 5.56,50.25},
{96.94,185.14},{46.54,107.56},{13.32,54.76},{21.63,58.16},
{35.36,86.17},{92.63,175.36},{35.77,93.56},{24.99,73.63},
{78.27,153.33},{44.20,89.01},{ 8.41,28.53},{22.39,65.89},
{ 4.01,63.33},{61.57,131.79},{39.85,95.92},{76.59,132.38},
{49.60,117.48},{54.00,114.01},{92.20,160.82},{98.73,180.85},
{56.18,108.40},{75.10,144.54},{36.91,105.85},{36.52,75.99},
{47.65,79.93},{72.52,151.63},{50.78,112.17},{85.76,151.27},
{14.41,51.07},{48.66,109.44},{50.39,104.20},{95.63,174.36},
{69.74,140.10},{25.53,87.75},{67.15,141.26},{55.44,129.77},
{20.44,65.95},{98.96,181.37},{47.04,111.25},{58.45,117.28},
{57.79,123.58},{83.84,171.31},{61.97,131.09},{37.66,109.35},
{65.32,124.56},{32.31,92.17},{64.08,140.31},{53.53,119.28},
{46.02,102.80},{ 8.19,46.99},{75.24,134.02},{11.54,67.14},
{37.35,94.89},{17.90,73.66},{68.74,127.84},{35.61,74.80},
{12.89,61.03},{51.57,127.06},{69.44,137.16},{55.00,87.07},
{ 9.46,66.57},{ 7.32,50.65},{18.69,60.81},{91.80,157.00},
{23.13,80.31},{79.77,160.31},{25.92,67.57},{88.84,157.95},
{59.62,122.91},{20.02,74.74},{66.18,130.20},{22.33,67.57},
{34.98,72.13},{27.88,80.57},{74.11,152.93},{63.45,123.91},
{34.17,76.27},{59.99,137.07},{26.78,70.38},{30.99,79.08},
{ 3.26,42.81},{51.85,97.62},{38.00,89.05},{69.57,128.69},
{55.77,102.44},{38.64,95.04},{31.94,80.16},{33.57,84.21},
{49.34,115.16},{60.37,126.82},{48.79,79.20},{53.62,101.98},
{44.49,120.58},{40.72,92.32},{62.30,134.92},{86.71,167.45},
{28.92,90.49},{ 3.64,50.38},{ 2.98,51.03},{ 7.72,50.72},
{36.01,88.98},{12.80,53.33},{18.18,59.93},{ 8.74,39.72},
{93.05,157.51},{78.51,157.36},{ 9.32,51.91},{28.43,83.25},
{ 5.63,57.81},{ 5.75,49.78},{33.39,89.81},{98.06,183.34},
{61.33,124.19},{90.28,167.83},{97.93,180.90},{24.74,74.19},
{28.42,90.02},{67.13,126.96},{29.53,65.54},{28.34,82.58},
{ 5.75,35.03},{36.57,89.60},{18.70,68.23},{85.61,147.33},
{90.19,148.59},{29.44,82.07},{95.34,164.13},{25.79,76.89},
{18.18,55.48},{47.36,99.63},{ 9.34,53.88},{95.16,174.51},
{79.94,148.24},{ 1.82,30.86},{75.83,138.77},{15.10,60.29},
{34.45,91.55},{ 0.41,39.02},{12.90,64.51},{75.66,161.10},
{40.13,85.95},{57.37,109.43},{79.64,155.61},{25.54,76.82},
{33.13,80.14},{31.55,75.12},{ 0.93,51.83},{36.40,97.82},
{55.60,112.28},{ 1.26,47.44},{73.54,125.72},{49.81,102.51},
{97.31,170.44},{89.64,169.02},{91.89,169.61},{45.89,107.66},
{99.90,168.98},{39.27,106.49},{63.61,118.52},{60.86,115.28},
{ 3.11,48.70},{61.64,119.97},{28.18,81.53},{ 5.20,43.98},
{51.76,117.25},{57.63,127.76},{78.84,153.80},{10.54,48.23},
{52.51,115.61},{ 4.69,28.30},{93.93,164.59},{69.70,141.08},
{57.13,118.40},{23.45,86.44},{44.59,132.03},{31.91,98.03},
{44.44,103.85},{ 9.46,53.83},{92.57,190.48},{36.29,95.15},
{32.06,92.48},{86.16,138.27},{49.86,112.94},{96.14,181.82},
{16.05,59.03},{52.13,102.89},{51.27,109.44},{67.94,117.86},
{86.77,158.44},{74.20,143.83},{39.13,93.09},{66.00,137.66},
{22.48,55.93},{54.90,116.64},{51.71,95.82},{36.07,85.96},
{ 5.38,59.94},{84.49,160.75},{28.93,82.85},{89.92,183.08},
{ 1.83,61.19},{26.71,73.35},{49.96,93.82},{13.56,68.73},
{26.93,70.29},{85.19,165.22},{74.31,148.26},{44.90,107.17},
{81.60,159.66},{86.68,150.41},{ 8.77,66.43},{75.18,159.06},
{ 8.86,45.78},{66.61,131.48},{ 5.80,40.46},{84.73,169.37},
{35.34,85.87},{97.62,161.97},{15.22,73.73},{77.52,152.32},
{ 2.96,45.12},{60.66,127.28},{66.50,131.20},{72.85,141.01},
{65.90,130.75},{43.44,101.16},{11.06,52.18},{14.77,62.27},
{77.52,159.43},{47.99,126.74},{63.43,110.36},{50.26,113.37},
{95.43,198.57},{24.53,57.71},{12.87,54.28},{63.79,135.32},
{56.58,110.85},{59.10,121.23},{99.99,194.48},{34.56,83.22},
{98.83,173.25},{ 7.73,70.43},{34.44,103.18},{96.37,169.25},
{50.19,118.06},{84.66,175.92},{79.01,142.91},{99.12,174.11},
{61.97,120.41},{81.93,146.96},{36.18,71.82},{ 3.47,51.04},
{ 0.07,52.79},{82.02,158.42},{37.02,94.32},{26.83,77.25},
{87.90,166.44},{22.65,76.05},{ 8.80,53.89},{53.12,109.05},
{64.69,147.19},{55.56,107.54},{98.42,175.01},{ 4.99,30.66},
{63.23,122.97},{56.44,121.49},{ 8.58,69.79},{79.38,149.69},
{55.64,122.77},{84.76,142.77},{29.21,85.68},{18.11,70.14},
{33.15,85.38},{11.30,44.68},{83.67,163.51},{23.43,101.91},
{29.59,81.85},{19.90,75.18},{26.05,89.95},{61.05,129.50},
{ 6.27,68.93},{96.95,191.69},{82.78,145.43},{73.84,136.65},
{60.44,140.08},{12.67,65.86},{ 2.20,36.68},{ 6.16,54.50},
{35.28,95.48},{83.01,152.33},{64.33,121.41},{91.72,162.34},
{72.62,130.77},{55.31,121.78},{83.52,164.02},{ 1.45,24.37},
{84.96,164.70},{ 6.85,59.62},{89.95,158.91},{57.41,133.29},
{ 6.13,58.72},{78.33,125.38},{65.15,121.67},{19.10,81.33},
{17.43,58.99},{60.92,142.74},{69.37,142.70},{ 7.71,53.52},
{38.81,101.59},{18.31,65.35},{41.05,90.89},{29.84,96.28},
{27.65,88.04},{11.74,30.21},{72.14,150.80},{59.92,123.34},
{20.59,51.65},{73.54,147.09},{25.04,52.81},{21.44,78.92},
{ 8.30,68.18},{38.27,103.50},{76.73,135.46},{13.41,42.84},
{ 9.77,50.17},{31.79,84.64},{11.63,47.87},{81.10,154.34},
{32.88,86.39},{83.66,156.16},{42.84,101.97},{92.23,181.69},
{56.62,128.67},{21.57,72.57},{28.42,76.81},{78.49,151.43},
{34.76,87.12},{95.65,153.86},{48.99,114.03},{22.97,82.24},
{96.82,167.98},{55.42,93.17},{59.22,121.38},{41.66,101.95},
{77.91,166.28},{12.14,54.69},{91.24,171.96},{46.22,106.58},
{98.54,161.56},{46.98,104.41},{60.09,131.79},{67.83,119.99},
{41.09,85.10},{70.10,135.73},{36.99,91.78},{10.72,53.99},
{50.06,91.80},{18.39,60.80},{26.53,85.06},{76.65,154.26},
{ 0.02,34.66},{37.56,104.13},{91.48,177.82},{ 0.31,41.81},
{93.20,166.21},{ 6.94,43.44},{85.38,159.15},{90.74,139.02},
{83.80,157.57},{39.34,92.96},{95.49,170.28},{77.55,145.57},
{33.05,88.61},{ 4.28,43.15},{95.89,183.02},{36.18,97.42},
{94.88,160.54},{ 5.18,55.47},{24.04,66.69},{50.93,118.02},
{11.82,58.02},{ 4.55,62.84},{71.87,136.43},{ 1.64,46.82},
{86.64,157.08},{76.78,135.11},{17.94,59.70},{41.81,96.43},
{89.21,170.11},{75.89,139.66},{90.73,160.27},{45.39,101.75},
{61.07,135.71},{ 7.53,48.91},{26.32,74.50},{89.44,164.11},
{79.85,162.30},{55.91,104.02},{89.47,172.39},{88.09,154.40},
{87.63,148.99},{24.43,74.77},{49.28,103.08},{86.49,165.82},
{93.79,148.29},{93.25,170.91},{59.55,126.98},{ 9.24,64.46},
{73.82,134.35},{76.07,152.94},{77.44,148.98},{87.01,161.01},
{72.52,154.33},{21.77,97.29},{47.74,111.47},{17.34,68.45},
{39.75,119.04},{84.78,160.15},{62.57,121.76},{70.20,146.15},
{ 7.08,50.89},{60.33,97.44},{29.90,89.84},{41.82,95.48},
{38.27,101.32},{ 9.60,53.87},{84.69,166.21},{97.64,177.02},
{73.96,145.64},{11.68,73.29},{31.64,75.20},{44.12,119.54},
{29.91,99.48},{62.85,117.28},{65.55,123.70},{78.66,161.94},
{71.06,158.16},{71.17,147.50},{12.49,63.11},{62.47,146.21},
{ 5.52,64.77},{19.39,81.84},{90.73,177.46},{46.94,101.25},
{35.30,92.84},{25.31,80.86},{29.29,95.30},{79.68,160.63},
{65.64,143.96},{81.97,173.79},{58.68,123.92},{37.35,94.26},
{81.31,146.56},{10.82,34.36},{72.19,152.74},{96.81,157.39},
{37.82,84.01},{26.02,85.45},{49.68,99.80},{63.61,134.18},
{78.45,135.62},{28.06,99.20},{29.49,76.17},{38.73,90.19},
{66.67,128.82},{ 4.14,43.63},{75.01,155.68},{12.38,39.83},
{64.06,126.87},{77.44,154.01},{30.67,89.38},{30.38,85.81},
{98.17,180.45},{72.62,131.67},{18.80,74.37},{56.99,112.44},
{80.45,152.36},{43.87,96.62},{10.95,79.63},{ 8.13,48.84},
{96.47,180.58},{57.99,139.71},{29.81,82.98},{ 7.69,59.04},
{60.75,113.11},{61.26,127.28},{29.91,74.20},{72.81,130.29},
{97.22,186.22},{16.10,67.14},{45.22,82.91},{59.90,136.15},
{50.86,99.99},{40.09,89.91},{38.69,87.12},{38.22,86.34},
{82.85,160.54},{44.59,114.27},{ 6.39,49.22},{53.02,118.97},
{67.10,132.43},{87.17,167.48},{61.46,109.49},{79.66,163.17},
{40.28,88.74},{81.76,164.45},{10.26,58.64},{14.58,81.13},
{85.30,184.26},{64.06,132.71},{ 5.55,56.52},{96.97,187.38},
{92.22,174.95},{42.45,100.51},{30.79,81.24},{ 4.25,61.71},
{47.15,104.16},{35.87,86.39},{81.62,152.64},{42.46,95.25},
{66.69,137.47},{33.21,84.65},{23.42,84.12},{99.30,187.76},
{19.15,77.26},{17.74,70.35},{87.90,170.12},{47.01,118.00},
{78.63,155.19},{92.38,163.60},{72.75,153.70},{79.92,138.69},
{21.94,78.76},{55.51,120.91},{27.08,57.31},{12.83,45.59},
{48.22,103.52},{35.64,87.26},{59.90,119.91},{50.05,110.55},
{ 0.23,41.68},{66.03,129.51},{42.67,95.15},{37.78,103.08},
{ 3.06,43.68},{53.80,102.89},{ 9.78,51.90},{94.94,185.83},
{31.69,105.92},{70.50,123.84},{ 5.52,51.03},{ 0.93,47.63},
{68.12,146.17},{ 6.86,51.21},{ 4.60,42.38},{72.98,138.03},
{58.59,125.26},{40.21,88.92},{12.51,41.25},{31.12,65.03},
{75.68,143.15},{74.02,141.52},{ 5.61,50.98},{82.39,162.02},
{28.07,65.38},{71.22,145.28},{44.22,99.25},{72.03,123.62},
{45.88,95.49},{76.37,136.85},{29.19,81.03},{63.46,142.45},
{49.44,100.25},{81.71,132.07},{83.34,150.05},{38.88,93.91},
{86.01,172.44},{51.32,110.43},{86.82,154.64},{70.02,140.53},
{26.43,72.25},{34.48,91.08},{30.41,80.09},{24.77,80.43},
{ 8.14,53.39},{18.88,70.38},{26.90,73.96},{94.43,173.51},
{24.45,62.18},{56.07,111.25},{66.96,136.85},{93.78,188.93},
{75.18,144.91},{18.22,43.66},{97.70,170.91},{34.25,95.34},
{12.16,53.60},{88.48,164.67},{81.58,176.86},{81.96,151.45},
{50.13,109.26},{44.20,90.81},{52.84,121.91},{17.30,76.64},
{53.60,120.26},{32.01,84.79},{72.56,149.71},{19.15,55.05},
{78.26,164.32},{ 9.84,56.96},{ 2.48,50.11},{50.84,134.33},
{90.65,164.89},{35.58,81.09},{72.54,151.27},{54.39,119.73},
{44.15,105.72},{74.88,145.20},{86.66,158.64},{17.79,67.78},
{54.84,115.87},{99.10,173.71},{93.02,174.29},{23.52,83.88},
{19.56,68.81},{24.03,79.83},{11.73,35.38},{ 3.82,37.84},
{61.92,130.48},{77.02,139.38},{91.29,161.65},{98.09,162.25},
{ 0.49,36.07},{75.44,138.86},{ 4.32,59.64},{79.99,143.62},
{13.43,47.42},{44.44,110.46},{25.03,71.30},{71.95,147.86},
{78.51,152.59},{ 3.43,34.40},{55.28,115.97},{88.77,165.45},
{15.43,82.65},{99.09,179.00},{79.77,143.93},{52.73,116.02},
{52.40,109.06},{37.24,83.30},{31.90,80.93},{68.13,127.42},
{70.63,127.66},{55.84,132.35},{39.95,99.29},{ 6.84,40.38},
{66.47,117.89},{20.53,83.00},{82.22,147.22},{23.74,73.03},
{77.83,159.15},{11.29,64.73},{49.15,104.01},{52.54,105.95},
{93.36,160.71},{51.35,106.68},{28.56,83.86},{78.27,147.91},
{ 0.25,50.56},{59.93,120.88},{ 8.58,49.04},{74.24,134.96},
{51.22,98.10},{24.91,74.31},{87.07,160.74},{52.25,105.22},
{91.43,152.78},{ 8.10,59.46},{94.97,178.64},{88.81,178.24},
{88.45,150.89},{21.60,76.09},{70.62,122.85},{99.65,168.36},
{73.32,142.58},{13.18,71.21},{37.26,88.09},{79.15,142.60},
{20.05,91.09},{33.64,87.49},{21.84,64.04},{49.12,116.82},
{52.57,125.37},{42.43,93.45},{22.54,94.46},{82.51,165.14},
{77.64,132.17},{32.25,83.61},{10.77,55.92},{71.34,133.82},
{60.85,127.38},{22.68,79.72},{30.62,77.71},{81.90,161.50},
{10.22,47.19},{26.58,57.16},{43.66,113.00},{90.69,145.82},
{12.64,58.91},{85.90,154.22},{18.03,53.36},{84.49,144.57},
{87.51,169.54},{92.50,170.96},{51.99,123.08},{45.16,108.57},
{71.40,137.44},{58.36,121.84},{76.06,143.46},{42.17,104.52},
{ 2.57,50.10},{11.44,41.77},{71.09,143.92},{88.92,151.09},
{92.79,177.42},{90.72,157.64},{66.11,141.52},{ 2.33,38.80},
{76.26,158.72},{76.52,150.07},{70.31,132.13},{52.77,119.85},
{99.59,176.94},{ 8.16,56.11},{99.29,190.79},{25.00,77.22},
{13.45,63.42},{17.35,70.05},{ 4.16,35.31},{86.57,152.50},
{88.57,168.14},{67.96,123.67},{72.36,142.41},{10.95,73.06},
{78.45,163.31},{71.69,139.46},{82.78,157.91},{80.14,161.51},
{60.33,133.76},{ 9.44,46.00},{68.21,163.48},{30.78,88.27},
{38.74,105.24},{19.52,62.94},{49.03,105.82},{76.01,138.95},
{71.08,165.10},{49.97,108.11},{75.15,145.38},{ 5.20,62.33},
{97.13,188.35},{87.18,176.82},{42.70,96.24},{62.98,126.92},
{96.09,175.08},{90.77,190.99},{71.68,124.23},{15.67,61.12},
{95.37,178.30},{40.64,83.70},{22.64,71.16},{30.22,105.64},
{18.96,77.17},{56.47,98.36},{36.53,84.99},{13.11,73.50},
{32.04,80.23},{72.49,135.67},{54.33,126.59},{13.54,51.56},
{14.77,57.56},{24.09,90.04},{32.43,86.80},{ 3.82,43.03},
{81.10,163.58},{45.39,96.22},{57.29,115.98},{76.10,151.06},
{ 7.74,56.38},{48.95,108.35},{40.07,101.13},{81.91,144.88},
{64.47,124.53},{70.83,129.18},{ 7.05,44.06},{36.46,86.68},
{32.53,89.07},{32.88,77.92},{ 6.62,29.48},{28.87,76.01},
{37.36,90.05},{72.25,136.07},{81.47,173.47},{ 4.20,47.41},
{98.64,166.84},{46.61,109.47},{45.38,88.61},{95.41,169.40},
{66.63,122.42},{98.96,176.41},{77.60,166.70},{39.53,93.96},
{73.29,138.23},{87.99,159.87},{34.35,91.01},{33.30,78.44},
{29.29,78.05},{89.99,153.84},{ 3.90,31.79},{ 2.74,28.45},
{74.07,144.41},{59.60,135.80},{83.19,154.17},{33.14,71.48},
{71.18,127.25},{59.10,126.89},{14.88,60.57},{46.36,122.43},
{97.68,166.02},{47.91,110.00},{94.43,185.03},{25.13,73.98},
{30.66,83.04},{47.36,100.33},{20.03,57.00},{38.53,77.34},
{53.29,122.85},{77.72,146.52},{23.42,84.64},{96.85,170.99},
{ 8.49,68.27},{71.67,127.11},{84.22,158.75},{35.25,87.63},
{74.00,140.25},{32.42,91.48},{88.91,156.95},{88.11,163.18},
{60.49,132.55},{63.94,149.75},{95.21,172.49},{ 2.14,37.70},
{ 2.33,36.29},{24.38,83.62},{87.11,162.19},{37.16,85.65},
{81.57,152.86},{49.26,116.82},{54.72,108.56},{65.82,132.02},
{10.93,47.63},{71.92,111.28},{12.67,36.23},{67.35,101.56},
{86.25,169.18},{97.89,194.43},{40.63,106.58},{73.87,135.71}
};
double error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<ndata; i++) {
error_sum += error(data[i].x, data[i].y, m, c);
}
mean = error_sum / ndata;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, points_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
points_t *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
for(int j=0; j<ndata; j++) {
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / ndata;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0;
}
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
bb676726facec490a764274b91f4816910650a13.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020 NVIDIA CORPORATION.
* Copyright (c) 2018-2020 Chris Choy ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map_functors.cuh"
#include "coordinate_map_gpu.cuh"
#include "types.hpp"
#include "utils.hpp"
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/host_vector.h>
#include <torch/extension.h>
namespace minkowski {
using coordinate_type = int32_t;
using index_type = default_types::index_type;
using size_type = default_types::size_type;
std::pair<size_type, double>
coordinate_map_batch_insert_test(const torch::Tensor &coordinates) {
// Create TensorArgs. These record the names and positions of each tensor as a
// parameter.
torch::TensorArg arg_coordinates(coordinates, "coordinates", 0);
torch::CheckedFrom c = "coordinate_test";
torch::checkContiguous(c, arg_coordinates);
// must match coordinate_type
torch::checkScalarType(c, arg_coordinates, torch::kInt);
torch::checkBackend(c, arg_coordinates.tensor, torch::Backend::CUDA);
torch::checkDim(c, arg_coordinates, 2);
auto const N = (index_type)coordinates.size(0);
auto const D = (index_type)coordinates.size(1);
coordinate_type const *d_ptr = coordinates.data_ptr<coordinate_type>();
LOG_DEBUG("Initialize a GPU map.");
CoordinateMapGPU<coordinate_type> map{N, D};
auto input_coordinates = coordinate_range<coordinate_type>(N, D, d_ptr);
thrust::counting_iterator<uint32_t> iter{0};
LOG_DEBUG("Insert coordinates");
timer t;
t.tic();
map.insert<false>(input_coordinates.begin(), // key begin
input_coordinates.end()); // key end
return std::make_pair<size_type, double>(map.size(), t.toc());
}
std::pair<at::Tensor, at::Tensor>
coordinate_map_inverse_map_test(const torch::Tensor &coordinates) {
// Create TensorArgs. These record the names and positions of each tensor as a
// parameter.
torch::TensorArg arg_coordinates(coordinates, "coordinates", 0);
torch::CheckedFrom c = "coordinate_test";
torch::checkContiguous(c, arg_coordinates);
// must match coordinate_type
torch::checkScalarType(c, arg_coordinates, torch::kInt);
torch::checkBackend(c, arg_coordinates.tensor, torch::Backend::CUDA);
torch::checkDim(c, arg_coordinates, 2);
auto const N = (index_type)coordinates.size(0);
auto const D = (index_type)coordinates.size(1);
coordinate_type const *d_ptr = coordinates.data_ptr<coordinate_type>();
LOG_DEBUG("Initialize a GPU map.");
CoordinateMapGPU<coordinate_type> map{N, D};
auto input_coordinates = coordinate_range<coordinate_type>(N, D, d_ptr);
thrust::counting_iterator<uint32_t> iter{0};
LOG_DEBUG("Insert coordinates");
auto mapping_inverse_mapping =
map.insert_and_map<true>(input_coordinates.begin(), // key begin
input_coordinates.end()); // key end
auto const &mapping = mapping_inverse_mapping.first;
auto const &inverse_mapping = mapping_inverse_mapping.second;
long const NM = mapping.size();
long const NI = inverse_mapping.size();
auto options = torch::TensorOptions()
.dtype(torch::kInt)
.device(torch::kCUDA, 0)
.layout(torch::kStrided)
.requires_grad(false);
torch::Tensor th_mapping = torch::empty({NM}, options);
torch::Tensor th_inverse_mapping = torch::empty({NI}, options);
// IMPORTANT: assuming int32_t overflow does not occur.
CUDA_CHECK(hipMemcpy(th_mapping.data_ptr<int32_t>(),
mapping.cdata(),
NM * sizeof(int32_t), hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(th_inverse_mapping.data_ptr<int32_t>(),
inverse_mapping.cdata(),
NI * sizeof(int32_t), hipMemcpyDeviceToDevice));
return std::make_pair<at::Tensor, at::Tensor>(std::move(th_mapping),
std::move(th_inverse_mapping));
}
std::pair<std::vector<index_type>, std::vector<index_type>>
coordinate_map_batch_find_test(const torch::Tensor &coordinates,
const torch::Tensor &queries) {
// Create TensorArgs. These record the names and positions of each tensor as a
// parameter.
torch::TensorArg arg_coordinates(coordinates, "coordinates", 0);
torch::TensorArg arg_queries(queries, "queries", 1);
torch::CheckedFrom c = "coordinate_test";
torch::checkContiguous(c, arg_coordinates);
torch::checkContiguous(c, arg_queries);
// must match coordinate_type
torch::checkScalarType(c, arg_coordinates, torch::kInt);
torch::checkScalarType(c, arg_queries, torch::kInt);
torch::checkBackend(c, arg_coordinates.tensor, torch::Backend::CUDA);
torch::checkBackend(c, arg_queries.tensor, torch::Backend::CUDA);
torch::checkDim(c, arg_coordinates, 2);
torch::checkDim(c, arg_queries, 2);
auto const N = (index_type)coordinates.size(0);
auto const D = (index_type)coordinates.size(1);
auto const NQ = (index_type)queries.size(0);
auto const DQ = (index_type)queries.size(1);
ASSERT(D == DQ, "Coordinates and queries must have the same size.");
coordinate_type const *ptr = coordinates.data_ptr<coordinate_type>();
coordinate_type const *query_ptr = queries.data_ptr<coordinate_type>();
CoordinateMapGPU<coordinate_type> map{N, D};
auto input_coordinates = coordinate_range<coordinate_type>(N, D, ptr);
thrust::counting_iterator<uint32_t> iter{0};
map.insert<false>(input_coordinates.begin(), // key begin
input_coordinates.end()); // key end
LOG_DEBUG("Map size", map.size());
auto query_coordinates = coordinate_range<coordinate_type>(NQ, D, query_ptr);
LOG_DEBUG("Find coordinates.");
auto const query_results =
map.find(query_coordinates.begin(), query_coordinates.end());
auto const &firsts(query_results.first);
auto const &seconds(query_results.second);
index_type NR = firsts.size();
LOG_DEBUG(NR, "keys found.");
std::vector<index_type> cpu_firsts(NR);
std::vector<index_type> cpu_seconds(NR);
THRUST_CHECK(thrust::copy(firsts.cbegin(), firsts.cend(), cpu_firsts.begin()));
THRUST_CHECK(thrust::copy(seconds.cbegin(), seconds.cend(), cpu_seconds.begin()));
return std::make_pair(cpu_firsts, cpu_seconds);
}
/******************************************************************************
* New coordinate map generation tests
******************************************************************************/
std::pair<size_type, std::vector<size_type>>
coordinate_map_stride_test(const torch::Tensor &coordinates,
const torch::Tensor &stride) {
// Create TensorArgs. These record the names and positions of each tensor as a
// parameter.
torch::TensorArg arg_coordinates(coordinates, "coordinates", 0);
torch::TensorArg arg_stride(stride, "stride", 1);
torch::CheckedFrom c = "coordinate_map_stride_test";
torch::checkContiguous(c, arg_coordinates);
// must match coordinate_type
torch::checkScalarType(c, arg_coordinates, torch::kInt);
torch::checkBackend(c, arg_coordinates.tensor, torch::Backend::CUDA);
torch::checkDim(c, arg_coordinates, 2);
// must match coordinate_type
torch::checkScalarType(c, arg_stride, torch::kInt);
torch::checkBackend(c, arg_stride.tensor, torch::Backend::CPU);
torch::checkDim(c, arg_stride, 1);
auto const N = (index_type)coordinates.size(0);
auto const D = (index_type)coordinates.size(1);
auto const NS = (index_type)stride.size(0);
ASSERT(NS == D - 1, "Invalid stride size", NS);
coordinate_type const *ptr = coordinates.data_ptr<coordinate_type>();
CoordinateMapGPU<coordinate_type> map{N, D};
auto input_coordinates = coordinate_range<coordinate_type>(N, D, ptr);
thrust::counting_iterator<uint32_t> iter{0};
map.insert<false>(input_coordinates.begin(), // key begin
input_coordinates.end()); // key end
// Stride
default_types::stride_type stride_vec(NS);
int32_t const *stride_ptr = stride.data_ptr<int32_t>();
for (uint32_t i = 0; i < NS; ++i) {
stride_vec[i] = stride_ptr[i];
ASSERT(stride_ptr[i] > 0, "Invalid stride. All strides must be positive.");
}
auto const stride_map = map.stride(stride_vec);
return std::make_pair(stride_map.size(), stride_map.get_tensor_stride());
}
} // namespace minkowski
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("coordinate_map_batch_insert_test",
&minkowski::coordinate_map_batch_insert_test,
"Minkowski Engine coordinate map batch insert test");
m.def("coordinate_map_inverse_map_test",
&minkowski::coordinate_map_inverse_map_test,
"Minkowski Engine coordinate map inverse map test");
m.def("coordinate_map_batch_find_test",
&minkowski::coordinate_map_batch_find_test,
"Minkowski Engine coordinate map batch find test");
m.def("coordinate_map_stride_test", &minkowski::coordinate_map_stride_test,
"Minkowski Engine coordinate map stride test");
}
| bb676726facec490a764274b91f4816910650a13.cu | /* Copyright (c) 2020 NVIDIA CORPORATION.
* Copyright (c) 2018-2020 Chris Choy ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map_functors.cuh"
#include "coordinate_map_gpu.cuh"
#include "types.hpp"
#include "utils.hpp"
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/host_vector.h>
#include <torch/extension.h>
namespace minkowski {
using coordinate_type = int32_t;
using index_type = default_types::index_type;
using size_type = default_types::size_type;
std::pair<size_type, double>
coordinate_map_batch_insert_test(const torch::Tensor &coordinates) {
// Create TensorArgs. These record the names and positions of each tensor as a
// parameter.
torch::TensorArg arg_coordinates(coordinates, "coordinates", 0);
torch::CheckedFrom c = "coordinate_test";
torch::checkContiguous(c, arg_coordinates);
// must match coordinate_type
torch::checkScalarType(c, arg_coordinates, torch::kInt);
torch::checkBackend(c, arg_coordinates.tensor, torch::Backend::CUDA);
torch::checkDim(c, arg_coordinates, 2);
auto const N = (index_type)coordinates.size(0);
auto const D = (index_type)coordinates.size(1);
coordinate_type const *d_ptr = coordinates.data_ptr<coordinate_type>();
LOG_DEBUG("Initialize a GPU map.");
CoordinateMapGPU<coordinate_type> map{N, D};
auto input_coordinates = coordinate_range<coordinate_type>(N, D, d_ptr);
thrust::counting_iterator<uint32_t> iter{0};
LOG_DEBUG("Insert coordinates");
timer t;
t.tic();
map.insert<false>(input_coordinates.begin(), // key begin
input_coordinates.end()); // key end
return std::make_pair<size_type, double>(map.size(), t.toc());
}
std::pair<at::Tensor, at::Tensor>
coordinate_map_inverse_map_test(const torch::Tensor &coordinates) {
// Create TensorArgs. These record the names and positions of each tensor as a
// parameter.
torch::TensorArg arg_coordinates(coordinates, "coordinates", 0);
torch::CheckedFrom c = "coordinate_test";
torch::checkContiguous(c, arg_coordinates);
// must match coordinate_type
torch::checkScalarType(c, arg_coordinates, torch::kInt);
torch::checkBackend(c, arg_coordinates.tensor, torch::Backend::CUDA);
torch::checkDim(c, arg_coordinates, 2);
auto const N = (index_type)coordinates.size(0);
auto const D = (index_type)coordinates.size(1);
coordinate_type const *d_ptr = coordinates.data_ptr<coordinate_type>();
LOG_DEBUG("Initialize a GPU map.");
CoordinateMapGPU<coordinate_type> map{N, D};
auto input_coordinates = coordinate_range<coordinate_type>(N, D, d_ptr);
thrust::counting_iterator<uint32_t> iter{0};
LOG_DEBUG("Insert coordinates");
auto mapping_inverse_mapping =
map.insert_and_map<true>(input_coordinates.begin(), // key begin
input_coordinates.end()); // key end
auto const &mapping = mapping_inverse_mapping.first;
auto const &inverse_mapping = mapping_inverse_mapping.second;
long const NM = mapping.size();
long const NI = inverse_mapping.size();
auto options = torch::TensorOptions()
.dtype(torch::kInt)
.device(torch::kCUDA, 0)
.layout(torch::kStrided)
.requires_grad(false);
torch::Tensor th_mapping = torch::empty({NM}, options);
torch::Tensor th_inverse_mapping = torch::empty({NI}, options);
// IMPORTANT: assuming int32_t overflow does not occur.
CUDA_CHECK(cudaMemcpy(th_mapping.data_ptr<int32_t>(),
mapping.cdata(),
NM * sizeof(int32_t), cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(th_inverse_mapping.data_ptr<int32_t>(),
inverse_mapping.cdata(),
NI * sizeof(int32_t), cudaMemcpyDeviceToDevice));
return std::make_pair<at::Tensor, at::Tensor>(std::move(th_mapping),
std::move(th_inverse_mapping));
}
std::pair<std::vector<index_type>, std::vector<index_type>>
coordinate_map_batch_find_test(const torch::Tensor &coordinates,
const torch::Tensor &queries) {
// Create TensorArgs. These record the names and positions of each tensor as a
// parameter.
torch::TensorArg arg_coordinates(coordinates, "coordinates", 0);
torch::TensorArg arg_queries(queries, "queries", 1);
torch::CheckedFrom c = "coordinate_test";
torch::checkContiguous(c, arg_coordinates);
torch::checkContiguous(c, arg_queries);
// must match coordinate_type
torch::checkScalarType(c, arg_coordinates, torch::kInt);
torch::checkScalarType(c, arg_queries, torch::kInt);
torch::checkBackend(c, arg_coordinates.tensor, torch::Backend::CUDA);
torch::checkBackend(c, arg_queries.tensor, torch::Backend::CUDA);
torch::checkDim(c, arg_coordinates, 2);
torch::checkDim(c, arg_queries, 2);
auto const N = (index_type)coordinates.size(0);
auto const D = (index_type)coordinates.size(1);
auto const NQ = (index_type)queries.size(0);
auto const DQ = (index_type)queries.size(1);
ASSERT(D == DQ, "Coordinates and queries must have the same size.");
coordinate_type const *ptr = coordinates.data_ptr<coordinate_type>();
coordinate_type const *query_ptr = queries.data_ptr<coordinate_type>();
CoordinateMapGPU<coordinate_type> map{N, D};
auto input_coordinates = coordinate_range<coordinate_type>(N, D, ptr);
thrust::counting_iterator<uint32_t> iter{0};
map.insert<false>(input_coordinates.begin(), // key begin
input_coordinates.end()); // key end
LOG_DEBUG("Map size", map.size());
auto query_coordinates = coordinate_range<coordinate_type>(NQ, D, query_ptr);
LOG_DEBUG("Find coordinates.");
auto const query_results =
map.find(query_coordinates.begin(), query_coordinates.end());
auto const &firsts(query_results.first);
auto const &seconds(query_results.second);
index_type NR = firsts.size();
LOG_DEBUG(NR, "keys found.");
std::vector<index_type> cpu_firsts(NR);
std::vector<index_type> cpu_seconds(NR);
THRUST_CHECK(thrust::copy(firsts.cbegin(), firsts.cend(), cpu_firsts.begin()));
THRUST_CHECK(thrust::copy(seconds.cbegin(), seconds.cend(), cpu_seconds.begin()));
return std::make_pair(cpu_firsts, cpu_seconds);
}
/******************************************************************************
* New coordinate map generation tests
******************************************************************************/
std::pair<size_type, std::vector<size_type>>
coordinate_map_stride_test(const torch::Tensor &coordinates,
const torch::Tensor &stride) {
// Create TensorArgs. These record the names and positions of each tensor as a
// parameter.
torch::TensorArg arg_coordinates(coordinates, "coordinates", 0);
torch::TensorArg arg_stride(stride, "stride", 1);
torch::CheckedFrom c = "coordinate_map_stride_test";
torch::checkContiguous(c, arg_coordinates);
// must match coordinate_type
torch::checkScalarType(c, arg_coordinates, torch::kInt);
torch::checkBackend(c, arg_coordinates.tensor, torch::Backend::CUDA);
torch::checkDim(c, arg_coordinates, 2);
// must match coordinate_type
torch::checkScalarType(c, arg_stride, torch::kInt);
torch::checkBackend(c, arg_stride.tensor, torch::Backend::CPU);
torch::checkDim(c, arg_stride, 1);
auto const N = (index_type)coordinates.size(0);
auto const D = (index_type)coordinates.size(1);
auto const NS = (index_type)stride.size(0);
ASSERT(NS == D - 1, "Invalid stride size", NS);
coordinate_type const *ptr = coordinates.data_ptr<coordinate_type>();
CoordinateMapGPU<coordinate_type> map{N, D};
auto input_coordinates = coordinate_range<coordinate_type>(N, D, ptr);
thrust::counting_iterator<uint32_t> iter{0};
map.insert<false>(input_coordinates.begin(), // key begin
input_coordinates.end()); // key end
// Stride
default_types::stride_type stride_vec(NS);
int32_t const *stride_ptr = stride.data_ptr<int32_t>();
for (uint32_t i = 0; i < NS; ++i) {
stride_vec[i] = stride_ptr[i];
ASSERT(stride_ptr[i] > 0, "Invalid stride. All strides must be positive.");
}
auto const stride_map = map.stride(stride_vec);
return std::make_pair(stride_map.size(), stride_map.get_tensor_stride());
}
} // namespace minkowski
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("coordinate_map_batch_insert_test",
&minkowski::coordinate_map_batch_insert_test,
"Minkowski Engine coordinate map batch insert test");
m.def("coordinate_map_inverse_map_test",
&minkowski::coordinate_map_inverse_map_test,
"Minkowski Engine coordinate map inverse map test");
m.def("coordinate_map_batch_find_test",
&minkowski::coordinate_map_batch_find_test,
"Minkowski Engine coordinate map batch find test");
m.def("coordinate_map_stride_test", &minkowski::coordinate_map_stride_test,
"Minkowski Engine coordinate map stride test");
}
|
29670d5e2a301e54d1e0781052d4d5251d473193.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/layers/st_layer.hpp"
#include "caffe/util/benchmark.hpp"
namespace caffe {
template <typename Dtype>
__global__ void set_value_to_constant(const int nthreads, Dtype value, int size,
int i, Dtype* dst) {
CUDA_KERNEL_LOOP(index, nthreads) {
dst[index * size + i] = value;
}
}
template <typename Dtype>
__global__ void copy_values(const int nthreads, int size_src, int k,
const Dtype* src, int size_dst, int i, Dtype* dst) {
CUDA_KERNEL_LOOP(index, nthreads) {
dst[index * size_dst + i] = src[index * size_src + k];
}
}
template <typename Dtype>
__global__ void SpatialTransformerForwardGPU(const int nthreads, int N, int C,
int output_H_, int output_W_, int H, int W,
const Dtype* input_grid_data, const Dtype* U, Dtype* V) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i;
const int row_idx = output_W_ * s + t;
const Dtype px = coordinates[row_idx * 2];
const Dtype py = coordinates[row_idx * 2 + 1];
const int V_offset = index;
V[V_offset] = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
int m, n; Dtype w;
const Dtype* pic = U + i * (C * H * W) + j * (H * W);
m = floor(x); n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x); n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
string prefix = "SpatialTransformerLayer::Forward_gpu::\t";
const Dtype* U = bottom[0]->gpu_data();
const Dtype* theta = bottom[1]->gpu_data();
const Dtype* output_grid_data = output_grid.gpu_data();
Dtype* full_theta_data = full_theta.mutable_gpu_data();
Dtype* full_gamma_data = full_gamma.mutable_cpu_data();
Dtype* input_grid_data = input_grid.mutable_gpu_data();
Dtype* V = top[0]->mutable_gpu_data();
caffe_gpu_set(input_grid.count(), (Dtype)0, input_grid_data);
caffe_gpu_set(top[0]->count(), (Dtype)0, V);
// compute full_theta
int k = 0;
const int num_threads = N;
for(int i=0; i<6; ++i) {
if(is_pre_defined_theta[i]) {
hipLaunchKernelGGL(( set_value_to_constant<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_threads, pre_defined_theta[i], 6, i, full_theta_data);
//std::cout << "Setting value " << pre_defined_theta[i] << " to "<< i <<
// "/6 of full_theta_data" << std::endl;
} else {
hipLaunchKernelGGL(( copy_values<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads,
6 - pre_defined_count, k, theta, 6, i, full_theta_data);
//std::cout << "Copying " << k << "/" << 6 - pre_defined_count << " of theta to "
// << i << "/6 of full_theta_data" << std::endl;
++ k;
}
}
// For detransform, calculate gamma for de-transform
if(de_transform){
for(int i=0; i<N; i++){
double denom_ = full_gamma_data[6*i+0]*full_gamma_data[6*i+4] - full_gamma_data[6*i+1]*full_gamma_data[6*i+3];
if(denom_ == 0.0){
DLOG(INFO) << "Singular matrix encountered. Do identity mapping.";
full_gamma_data[6*i+0] = 1; full_gamma_data[6*i+1] = 0; full_gamma_data[6*i+2] = 0;
full_gamma_data[6*i+3] = 0; full_gamma_data[6*i+4] = 1; full_gamma_data[6*i+5] = 0;
}
else{
Dtype tmp_a = full_gamma_data[6*i+0];
Dtype tmp_b = full_gamma_data[6*i+1];
full_gamma_data[6*i+0] = full_gamma_data[6*i+4]/denom_; full_gamma_data[6*i+1] = full_gamma_data[6*i+3]/denom_;
full_gamma_data[6*i+3] = tmp_b/denom_; full_gamma_data[6*i+4] = tmp_a/denom_;
Dtype tmp_c = full_gamma_data[6*i+2];
Dtype tmp_d = full_gamma_data[6*i+5];
full_gamma_data[6*i+2] = -(full_gamma_data[6*i+0]*tmp_c + full_gamma_data[6*i+1]*tmp_d);
full_gamma_data[6*i+5] = -(full_gamma_data[6*i+3]*tmp_c + full_gamma_data[6*i+4]*tmp_d);
}
}
// compute out input_grid_data
for(int i = 0; i < N; ++i) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 2, 3, (Dtype)1.,
output_grid_data, full_gamma_data + 6 * i, (Dtype)0.,
input_grid_data + (output_H_ * output_W_ * 2) * i);
}
}
else{
// compute out input_grid_data
for(int i = 0; i < N; ++i) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 2, 3, (Dtype)1.,
output_grid_data, full_theta_data + 6 * i, (Dtype)0.,
input_grid_data + (output_H_ * output_W_ * 2) * i);
}
}
const int nthreads = N * C * output_H_ * output_W_;
hipLaunchKernelGGL(( SpatialTransformerForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V);
}
template <typename Dtype>
__global__ void SpatialTransformerBackwardGPU_dTheta(const int nthreads, int C,
int output_H_, int output_W_, int H, int W,
const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array,
Dtype* dTheta_tmp_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i;
const int row_idx = output_W_ * s + t;
const Dtype px = coordinates[row_idx * 2];
const Dtype py = coordinates[row_idx * 2 + 1];
Dtype delta_dpx = (Dtype)0.;
Dtype delta_dpy = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
const int dV_offset = index;
const Dtype dV = dV_array[dV_offset];
int m, n;
const Dtype* U = U_array + i * (C * H * W) + j * (H * W);
// left-bottom neighbor
m = floor(x); n = floor(y);
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// left-top neighbor
m = floor(x); n = floor(y) + 1;
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// right-bottom neighbor
m = floor(x) + 1; n = floor(y);
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
// right-top neighbor
m = floor(x) + 1; n = floor(y) + 1;
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
int idx = j * (output_H_ * output_W_) + s * output_W_ + t;
dTheta_tmp_diff[(6 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (s * 1.0 / output_H_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (t * 1.0 / output_W_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx;
dTheta_tmp_diff[(6 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (s * 1.0 / output_H_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (t * 1.0 / output_W_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy;
}
}
template <typename Dtype>
__global__ void SpatialTransformerBackwardGPU_dU(const int nthreads, const int C,
const int W, const int H, const int output_H_, const int output_W_,
const Dtype* input_grid_data, const Dtype* dV, Dtype* dU) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i;
const int row_idx = output_W_ * s + t;
const Dtype px = coordinates[row_idx * 2];
const Dtype py = coordinates[row_idx * 2 + 1];
const int V_offset = index;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
int m, n; Dtype w;
Dtype* pic = dU + i * (C * H * W) + j * (H * W);
m = floor(x); n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (y - n));
caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n));
}
m = floor(x) + 1; n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (y - n));
caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n));
}
m = floor(x); n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (n - y));
caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n));
}
m = floor(x) + 1; n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (n - y));
caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n));
}
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
string prefix = "SpatialTransformerLayer::Backward_GPU::\t";
const Dtype* dV = top[0]->gpu_diff();
const Dtype* input_grid_data = input_grid.gpu_data();
const Dtype* U = bottom[0]->gpu_data();
Dtype* dFull_theta = full_theta.mutable_gpu_diff();
Dtype* dTheta = bottom[1]->mutable_gpu_diff();
if(!de_transform){
Dtype* dTheta_tmp_diff = dTheta_tmp.mutable_gpu_diff();
caffe_gpu_set(dTheta_tmp.count(), (Dtype)0., dTheta_tmp_diff);
const int nthreads = N * C * output_H_ * output_W_;
hipLaunchKernelGGL(( SpatialTransformerBackwardGPU_dTheta<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, C, output_H_, output_W_, H, W, input_grid_data,
dV, U, dTheta_tmp_diff);
Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data();
caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, full_theta.count(), 1, output_H_ * output_W_ * C,
(Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dFull_theta);
/*const Dtype* db_dFull_theta = full_theta.cpu_diff();
for(int i=0; i<full_theta.count(); ++i) {
std::cout << db_dFull_theta[i] << " ";
}
std::cout<<std::endl;*/}
else{
Dtype* dFull_gamma = full_gamma.mutable_gpu_diff();
Dtype* dGamma = bottom[1]->mutable_gpu_diff();
Dtype* dGamma_tmp_diff = dGamma_tmp.mutable_gpu_diff();
Dtype* dTheta_1_2_data = dTheta_1_2.mutable_gpu_data();
caffe_gpu_set(dGamma_tmp.count(), (Dtype)0., dGamma_tmp_diff);
const int nthreads = N * C * output_H_ * output_W_;
hipLaunchKernelGGL(( SpatialTransformerBackwardGPU_dTheta<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, C, output_H_, output_W_, H, W, input_grid_data,
dV, U, dGamma_tmp_diff);
Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data();
caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, full_gamma.count(), 1, output_H_ * output_W_ * C,
(Dtype)1., dGamma_tmp_diff, all_ones_2_data, (Dtype)0., dFull_gamma);
/*const Dtype* db_dFull_theta = full_theta.cpu_diff();
for(int i=0; i<full_theta.count(); ++i) {
std::cout << db_dFull_theta[i] << " ";
}
sstd::cout<<std::endl;*/
const Dtype* full_theta_data = full_theta.gpu_data();
const Dtype* full_gamma_data = full_gamma.gpu_data();
Dtype* dg_dt_data = dg_dt.mutable_gpu_data();
Dtype* dGamma_1_2_data = dGamma_1_2.mutable_gpu_data();
for(int i=0; i<N; i++){
double denom_ = full_theta_data[6*i+0]*full_theta_data[6*i+4] - full_theta_data[6*i+1]*full_theta_data[6*i+3];
if(denom_ == 0){
dFull_theta[6*i+0] = 0; dFull_theta[6*i+1] = 0; dFull_theta[6*i+2] = 0;
dFull_theta[6*i+3] = 0; dFull_theta[6*i+4] = 0; dFull_theta[6*i+5] = 0;
}
else{
//d_theta_3
dFull_theta[6*i+2] = -1 * (full_gamma_data[6*i+0]*dGamma[6*i+2] + full_gamma_data[6*i+1]*dGamma[6*i+5]);
dFull_theta[6*i+5] = -1 * (full_gamma_data[6*i+3]*dGamma[6*i+2] + full_gamma_data[6*i+4]*dGamma[6*i+5]);
//d_theta_1_2
dg_dt_data[0*4 + 0] = (-1)*full_theta_data[6*i + 4]*full_theta_data[6*i + 4]; dg_dt_data[0*4 + 1] = full_theta_data[6*i + 1]*full_theta_data[6*i + 4]; dg_dt_data[0*4 + 2] = full_theta_data[6*i + 3]*full_theta_data[6*i + 4]; dg_dt_data[0*4 + 3] = (-1)*full_theta_data[6*i + 3]*full_theta_data[6*i + 1];
dg_dt_data[1*4 + 0] = full_theta_data[6*i + 4]*full_theta_data[6*i + 3]; dg_dt_data[1*4 + 1] = (-1)*full_theta_data[6*i + 0]*full_theta_data[6*i + 4]; dg_dt_data[1*4 + 2] = (-1)*full_theta_data[6*i + 3]*full_theta_data[6*i + 3]; dg_dt_data[1*4 + 3] = full_theta_data[6*i + 0]*full_theta_data[6*i + 3];
dg_dt_data[2*4 + 0] = full_theta_data[6*i + 4]*full_theta_data[6*i + 1]; dg_dt_data[2*4 + 1] = (-1)*full_theta_data[6*i + 1]*full_theta_data[6*i + 1]; dg_dt_data[2*4 + 2] = (-1)*full_theta_data[6*i + 0]*full_theta_data[6*i + 4]; dg_dt_data[2*4 + 3] = full_theta_data[6*i + 0]*full_theta_data[6*i + 1];
dg_dt_data[3*4 + 0] = (-1)*full_theta_data[6*i + 3]*full_theta_data[6*i + 1]; dg_dt_data[3*4 + 1] = full_theta_data[6*i + 0]*full_theta_data[6*i + 1]; dg_dt_data[3*4 + 2] = full_theta_data[6*i + 3]*full_theta_data[6*i + 0]; dg_dt_data[3*4 + 3] = (-1)*full_theta_data[6*i + 0]*full_theta_data[6*i + 0];
dGamma_1_2_data[0] = dGamma[6*i + 0] - dGamma[6*i + 2]*full_theta_data[6*i + 2];
dGamma_1_2_data[1] = dGamma[6*i + 3] - dGamma[6*i + 5]*full_theta_data[6*i + 2];
dGamma_1_2_data[2] = dGamma[6*i + 1] - dGamma[6*i + 2]*full_theta_data[6*i + 5];
dGamma_1_2_data[3] = dGamma[6*i + 4] - dGamma[6*i + 5]*full_theta_data[6*i + 5];
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 4, 4, 1,
(Dtype)1., dg_dt_data, dGamma_1_2_data, (Dtype)0., dTheta_1_2_data);
dFull_theta[6*i+0] = dTheta_1_2_data[0]; dFull_theta[6*i+1] = dTheta_1_2_data[2];
dFull_theta[6*i+3] = dTheta_1_2_data[1]; dFull_theta[6*i+4] = dTheta_1_2_data[3];
}
}
}
int k = 0;
const int num_threads = N;
for(int i=0; i<6; ++i) {
if(!is_pre_defined_theta[i]) {
hipLaunchKernelGGL(( copy_values<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads,
6, i, dFull_theta, 6 - pre_defined_count, k, dTheta);
//std::cout << "Copying " << i << "/6 of dFull_theta to " << k << "/" <<
// 6 - pre_defined_count << " of dTheta" << std::endl;
++ k;
}
}
/*const Dtype* db_dtheta = bottom[1]->cpu_diff();
for(int i=0; i<bottom[1]->count(); ++i) {
std::cout << db_dtheta[i] << " ";
}
std::cout<<std::endl;*/
if(to_compute_dU_ or de_transform) {
Dtype* dU = bottom[0]->mutable_gpu_diff();
caffe_gpu_set(bottom[0]->count(), (Dtype)0., dU);
const int nthreads = N * C * output_H_ * output_W_;
hipLaunchKernelGGL(( SpatialTransformerBackwardGPU_dU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, C, W, H, output_H_, output_W_, input_grid_data, dV, dU);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe | 29670d5e2a301e54d1e0781052d4d5251d473193.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/layers/st_layer.hpp"
#include "caffe/util/benchmark.hpp"
namespace caffe {
template <typename Dtype>
__global__ void set_value_to_constant(const int nthreads, Dtype value, int size,
int i, Dtype* dst) {
CUDA_KERNEL_LOOP(index, nthreads) {
dst[index * size + i] = value;
}
}
template <typename Dtype>
__global__ void copy_values(const int nthreads, int size_src, int k,
const Dtype* src, int size_dst, int i, Dtype* dst) {
CUDA_KERNEL_LOOP(index, nthreads) {
dst[index * size_dst + i] = src[index * size_src + k];
}
}
template <typename Dtype>
__global__ void SpatialTransformerForwardGPU(const int nthreads, int N, int C,
int output_H_, int output_W_, int H, int W,
const Dtype* input_grid_data, const Dtype* U, Dtype* V) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i;
const int row_idx = output_W_ * s + t;
const Dtype px = coordinates[row_idx * 2];
const Dtype py = coordinates[row_idx * 2 + 1];
const int V_offset = index;
V[V_offset] = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
int m, n; Dtype w;
const Dtype* pic = U + i * (C * H * W) + j * (H * W);
m = floor(x); n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x); n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
string prefix = "SpatialTransformerLayer::Forward_gpu::\t";
const Dtype* U = bottom[0]->gpu_data();
const Dtype* theta = bottom[1]->gpu_data();
const Dtype* output_grid_data = output_grid.gpu_data();
Dtype* full_theta_data = full_theta.mutable_gpu_data();
Dtype* full_gamma_data = full_gamma.mutable_cpu_data();
Dtype* input_grid_data = input_grid.mutable_gpu_data();
Dtype* V = top[0]->mutable_gpu_data();
caffe_gpu_set(input_grid.count(), (Dtype)0, input_grid_data);
caffe_gpu_set(top[0]->count(), (Dtype)0, V);
// compute full_theta
int k = 0;
const int num_threads = N;
for(int i=0; i<6; ++i) {
if(is_pre_defined_theta[i]) {
set_value_to_constant<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>(
num_threads, pre_defined_theta[i], 6, i, full_theta_data);
//std::cout << "Setting value " << pre_defined_theta[i] << " to "<< i <<
// "/6 of full_theta_data" << std::endl;
} else {
copy_values<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>(num_threads,
6 - pre_defined_count, k, theta, 6, i, full_theta_data);
//std::cout << "Copying " << k << "/" << 6 - pre_defined_count << " of theta to "
// << i << "/6 of full_theta_data" << std::endl;
++ k;
}
}
// For detransform, calculate gamma for de-transform
if(de_transform){
for(int i=0; i<N; i++){
double denom_ = full_gamma_data[6*i+0]*full_gamma_data[6*i+4] - full_gamma_data[6*i+1]*full_gamma_data[6*i+3];
if(denom_ == 0.0){
DLOG(INFO) << "Singular matrix encountered. Do identity mapping.";
full_gamma_data[6*i+0] = 1; full_gamma_data[6*i+1] = 0; full_gamma_data[6*i+2] = 0;
full_gamma_data[6*i+3] = 0; full_gamma_data[6*i+4] = 1; full_gamma_data[6*i+5] = 0;
}
else{
Dtype tmp_a = full_gamma_data[6*i+0];
Dtype tmp_b = full_gamma_data[6*i+1];
full_gamma_data[6*i+0] = full_gamma_data[6*i+4]/denom_; full_gamma_data[6*i+1] = full_gamma_data[6*i+3]/denom_;
full_gamma_data[6*i+3] = tmp_b/denom_; full_gamma_data[6*i+4] = tmp_a/denom_;
Dtype tmp_c = full_gamma_data[6*i+2];
Dtype tmp_d = full_gamma_data[6*i+5];
full_gamma_data[6*i+2] = -(full_gamma_data[6*i+0]*tmp_c + full_gamma_data[6*i+1]*tmp_d);
full_gamma_data[6*i+5] = -(full_gamma_data[6*i+3]*tmp_c + full_gamma_data[6*i+4]*tmp_d);
}
}
// compute out input_grid_data
for(int i = 0; i < N; ++i) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 2, 3, (Dtype)1.,
output_grid_data, full_gamma_data + 6 * i, (Dtype)0.,
input_grid_data + (output_H_ * output_W_ * 2) * i);
}
}
else{
// compute out input_grid_data
for(int i = 0; i < N; ++i) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 2, 3, (Dtype)1.,
output_grid_data, full_theta_data + 6 * i, (Dtype)0.,
input_grid_data + (output_H_ * output_W_ * 2) * i);
}
}
const int nthreads = N * C * output_H_ * output_W_;
SpatialTransformerForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V);
}
template <typename Dtype>
__global__ void SpatialTransformerBackwardGPU_dTheta(const int nthreads, int C,
int output_H_, int output_W_, int H, int W,
const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array,
Dtype* dTheta_tmp_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i;
const int row_idx = output_W_ * s + t;
const Dtype px = coordinates[row_idx * 2];
const Dtype py = coordinates[row_idx * 2 + 1];
Dtype delta_dpx = (Dtype)0.;
Dtype delta_dpy = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
const int dV_offset = index;
const Dtype dV = dV_array[dV_offset];
int m, n;
const Dtype* U = U_array + i * (C * H * W) + j * (H * W);
// left-bottom neighbor
m = floor(x); n = floor(y);
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// left-top neighbor
m = floor(x); n = floor(y) + 1;
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// right-bottom neighbor
m = floor(x) + 1; n = floor(y);
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
// right-top neighbor
m = floor(x) + 1; n = floor(y) + 1;
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
int idx = j * (output_H_ * output_W_) + s * output_W_ + t;
dTheta_tmp_diff[(6 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (s * 1.0 / output_H_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (t * 1.0 / output_W_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx;
dTheta_tmp_diff[(6 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (s * 1.0 / output_H_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (t * 1.0 / output_W_ * 2 - 1);
dTheta_tmp_diff[(6 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy;
}
}
template <typename Dtype>
__global__ void SpatialTransformerBackwardGPU_dU(const int nthreads, const int C,
const int W, const int H, const int output_H_, const int output_W_,
const Dtype* input_grid_data, const Dtype* dV, Dtype* dU) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i;
const int row_idx = output_W_ * s + t;
const Dtype px = coordinates[row_idx * 2];
const Dtype py = coordinates[row_idx * 2 + 1];
const int V_offset = index;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
int m, n; Dtype w;
Dtype* pic = dU + i * (C * H * W) + j * (H * W);
m = floor(x); n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (y - n));
caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n));
}
m = floor(x) + 1; n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (y - n));
caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n));
}
m = floor(x); n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (n - y));
caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n));
}
m = floor(x) + 1; n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (n - y));
caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n));
}
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
string prefix = "SpatialTransformerLayer::Backward_GPU::\t";
const Dtype* dV = top[0]->gpu_diff();
const Dtype* input_grid_data = input_grid.gpu_data();
const Dtype* U = bottom[0]->gpu_data();
Dtype* dFull_theta = full_theta.mutable_gpu_diff();
Dtype* dTheta = bottom[1]->mutable_gpu_diff();
if(!de_transform){
Dtype* dTheta_tmp_diff = dTheta_tmp.mutable_gpu_diff();
caffe_gpu_set(dTheta_tmp.count(), (Dtype)0., dTheta_tmp_diff);
const int nthreads = N * C * output_H_ * output_W_;
SpatialTransformerBackwardGPU_dTheta<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, C, output_H_, output_W_, H, W, input_grid_data,
dV, U, dTheta_tmp_diff);
Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data();
caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, full_theta.count(), 1, output_H_ * output_W_ * C,
(Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dFull_theta);
/*const Dtype* db_dFull_theta = full_theta.cpu_diff();
for(int i=0; i<full_theta.count(); ++i) {
std::cout << db_dFull_theta[i] << " ";
}
std::cout<<std::endl;*/}
else{
Dtype* dFull_gamma = full_gamma.mutable_gpu_diff();
Dtype* dGamma = bottom[1]->mutable_gpu_diff();
Dtype* dGamma_tmp_diff = dGamma_tmp.mutable_gpu_diff();
Dtype* dTheta_1_2_data = dTheta_1_2.mutable_gpu_data();
caffe_gpu_set(dGamma_tmp.count(), (Dtype)0., dGamma_tmp_diff);
const int nthreads = N * C * output_H_ * output_W_;
SpatialTransformerBackwardGPU_dTheta<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, C, output_H_, output_W_, H, W, input_grid_data,
dV, U, dGamma_tmp_diff);
Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data();
caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, full_gamma.count(), 1, output_H_ * output_W_ * C,
(Dtype)1., dGamma_tmp_diff, all_ones_2_data, (Dtype)0., dFull_gamma);
/*const Dtype* db_dFull_theta = full_theta.cpu_diff();
for(int i=0; i<full_theta.count(); ++i) {
std::cout << db_dFull_theta[i] << " ";
}
sstd::cout<<std::endl;*/
const Dtype* full_theta_data = full_theta.gpu_data();
const Dtype* full_gamma_data = full_gamma.gpu_data();
Dtype* dg_dt_data = dg_dt.mutable_gpu_data();
Dtype* dGamma_1_2_data = dGamma_1_2.mutable_gpu_data();
for(int i=0; i<N; i++){
double denom_ = full_theta_data[6*i+0]*full_theta_data[6*i+4] - full_theta_data[6*i+1]*full_theta_data[6*i+3];
if(denom_ == 0){
dFull_theta[6*i+0] = 0; dFull_theta[6*i+1] = 0; dFull_theta[6*i+2] = 0;
dFull_theta[6*i+3] = 0; dFull_theta[6*i+4] = 0; dFull_theta[6*i+5] = 0;
}
else{
//d_theta_3
dFull_theta[6*i+2] = -1 * (full_gamma_data[6*i+0]*dGamma[6*i+2] + full_gamma_data[6*i+1]*dGamma[6*i+5]);
dFull_theta[6*i+5] = -1 * (full_gamma_data[6*i+3]*dGamma[6*i+2] + full_gamma_data[6*i+4]*dGamma[6*i+5]);
//d_theta_1_2
dg_dt_data[0*4 + 0] = (-1)*full_theta_data[6*i + 4]*full_theta_data[6*i + 4]; dg_dt_data[0*4 + 1] = full_theta_data[6*i + 1]*full_theta_data[6*i + 4]; dg_dt_data[0*4 + 2] = full_theta_data[6*i + 3]*full_theta_data[6*i + 4]; dg_dt_data[0*4 + 3] = (-1)*full_theta_data[6*i + 3]*full_theta_data[6*i + 1];
dg_dt_data[1*4 + 0] = full_theta_data[6*i + 4]*full_theta_data[6*i + 3]; dg_dt_data[1*4 + 1] = (-1)*full_theta_data[6*i + 0]*full_theta_data[6*i + 4]; dg_dt_data[1*4 + 2] = (-1)*full_theta_data[6*i + 3]*full_theta_data[6*i + 3]; dg_dt_data[1*4 + 3] = full_theta_data[6*i + 0]*full_theta_data[6*i + 3];
dg_dt_data[2*4 + 0] = full_theta_data[6*i + 4]*full_theta_data[6*i + 1]; dg_dt_data[2*4 + 1] = (-1)*full_theta_data[6*i + 1]*full_theta_data[6*i + 1]; dg_dt_data[2*4 + 2] = (-1)*full_theta_data[6*i + 0]*full_theta_data[6*i + 4]; dg_dt_data[2*4 + 3] = full_theta_data[6*i + 0]*full_theta_data[6*i + 1];
dg_dt_data[3*4 + 0] = (-1)*full_theta_data[6*i + 3]*full_theta_data[6*i + 1]; dg_dt_data[3*4 + 1] = full_theta_data[6*i + 0]*full_theta_data[6*i + 1]; dg_dt_data[3*4 + 2] = full_theta_data[6*i + 3]*full_theta_data[6*i + 0]; dg_dt_data[3*4 + 3] = (-1)*full_theta_data[6*i + 0]*full_theta_data[6*i + 0];
dGamma_1_2_data[0] = dGamma[6*i + 0] - dGamma[6*i + 2]*full_theta_data[6*i + 2];
dGamma_1_2_data[1] = dGamma[6*i + 3] - dGamma[6*i + 5]*full_theta_data[6*i + 2];
dGamma_1_2_data[2] = dGamma[6*i + 1] - dGamma[6*i + 2]*full_theta_data[6*i + 5];
dGamma_1_2_data[3] = dGamma[6*i + 4] - dGamma[6*i + 5]*full_theta_data[6*i + 5];
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 4, 4, 1,
(Dtype)1., dg_dt_data, dGamma_1_2_data, (Dtype)0., dTheta_1_2_data);
dFull_theta[6*i+0] = dTheta_1_2_data[0]; dFull_theta[6*i+1] = dTheta_1_2_data[2];
dFull_theta[6*i+3] = dTheta_1_2_data[1]; dFull_theta[6*i+4] = dTheta_1_2_data[3];
}
}
}
int k = 0;
const int num_threads = N;
for(int i=0; i<6; ++i) {
if(!is_pre_defined_theta[i]) {
copy_values<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>(num_threads,
6, i, dFull_theta, 6 - pre_defined_count, k, dTheta);
//std::cout << "Copying " << i << "/6 of dFull_theta to " << k << "/" <<
// 6 - pre_defined_count << " of dTheta" << std::endl;
++ k;
}
}
/*const Dtype* db_dtheta = bottom[1]->cpu_diff();
for(int i=0; i<bottom[1]->count(); ++i) {
std::cout << db_dtheta[i] << " ";
}
std::cout<<std::endl;*/
if(to_compute_dU_ or de_transform) {
Dtype* dU = bottom[0]->mutable_gpu_diff();
caffe_gpu_set(bottom[0]->count(), (Dtype)0., dU);
const int nthreads = N * C * output_H_ * output_W_;
SpatialTransformerBackwardGPU_dU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, C, W, H, output_H_, output_W_, input_grid_data, dV, dU);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe |
3d122b9e29bf25a37619ced6314e2256a01bf379.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** @file reduce_sum.cu
* @author Thomas Mller, NVIDIA
* @brief Wrapper around thrust's sum reduction to provide warning-free compilation.
*/
#include <tiny-cuda-nn/reduce_sum.h>
namespace tcnn {
__global__ void block_reduce1(
const uint32_t n_elements,
float* __restrict__ inout
) {
const uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
extern __shared__ float sdata[];
sdata[threadIdx.x] = i < n_elements ? inout[i] : 0;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (threadIdx.x < s) {
sdata[threadIdx.x] += sdata[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x < 32) {
float val = sdata[threadIdx.x];
val = warp_reduce(val);
if (threadIdx.x == 0) {
inout[blockIdx.x] = val;
}
}
}
uint32_t reduce_sum_workspace_size(uint32_t n_elements) {
return n_blocks_linear(n_elements);
}
}
| 3d122b9e29bf25a37619ced6314e2256a01bf379.cu | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** @file reduce_sum.cu
* @author Thomas Müller, NVIDIA
* @brief Wrapper around thrust's sum reduction to provide warning-free compilation.
*/
#include <tiny-cuda-nn/reduce_sum.h>
namespace tcnn {
__global__ void block_reduce1(
const uint32_t n_elements,
float* __restrict__ inout
) {
const uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
extern __shared__ float sdata[];
sdata[threadIdx.x] = i < n_elements ? inout[i] : 0;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (threadIdx.x < s) {
sdata[threadIdx.x] += sdata[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x < 32) {
float val = sdata[threadIdx.x];
val = warp_reduce(val);
if (threadIdx.x == 0) {
inout[blockIdx.x] = val;
}
}
}
uint32_t reduce_sum_workspace_size(uint32_t n_elements) {
return n_blocks_linear(n_elements);
}
}
|
d4b64817ed05bbcad5640d22d7a8b16400c5ae52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions d -> s
@author Stan Tomov
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#include "magma_templates.h"
#define NTHREADS 64
#define NBLOCKS 40
__global__ void
dsiinertia_kernel(int n, magmaDouble_const_ptr dA, int ldda, int *dneig)
{
const int tx = threadIdx.x;
const int blk = blockIdx.x;
int peig = 0, neig = 0, zeig = 0;
__shared__ int pe[NTHREADS], ne[NTHREADS], ze[NTHREADS];
// Each thread computes its part of the intertia
for(int i=tx + blk*NTHREADS; i<n; i+= NTHREADS*NBLOCKS) {
double diag = MAGMA_D_REAL(dA[i+i*ldda]);
if (diag > 0.0)
peig++;
else if (diag < 0.0)
neig++;
else
zeig++;
}
pe[tx] = peig;
ne[tx] = neig;
ze[tx] = zeig;
// The threads within a thread block sum their contributions to the inertia
magma_sum_reduce< NTHREADS >( tx, pe );
magma_sum_reduce< NTHREADS >( tx, ne );
magma_sum_reduce< NTHREADS >( tx, ze );
__syncthreads();
// Attomic sum the contributions from all theread blocks (by thread 0)
if (tx == 0){
atomicAdd(&dneig[0], pe[0]);
atomicAdd(&dneig[1], ne[0]);
atomicAdd(&dneig[2], ze[0]);
}
}
/***************************************************************************//**
Purpose
-------
magmablas_ddiinertia computes the inertia of a real diagonal matrix.
If matrix entries are real, magmablas_ddiinertia considers the real
part of the diagonal.
Arguments
----------
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
dA DOUBLE PRECISION array of DIMENSION ( LDDA, n ).
The input matrix A with diagonal entries for which the inertia
is computed. If dA is real, the computation is done on the
real part of the diagonal.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the leading dimension of A.
LDDA must be at least max( 1, n ).
@param[out]
dneig INTEGER array of DIMENSION 3 on the GPU memory.
The number of positive, negative, and zero eigenvalues
in this order.
@param[in]
queue magma_queue_t.
Queue to execute in.
@ingroup magma_hetrf
*******************************************************************************/
extern "C"
magma_int_t
magmablas_dsiinertia(
magma_int_t n,
magmaDouble_const_ptr dA, magma_int_t ldda,
int *dneig,
magma_queue_t queue )
{
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( n < 0 ) {
info = -1;
} else if ( ldda < max(1, n) ) {
info = -3;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if (n == 0)
return info;
dim3 grid( NBLOCKS, 1, 1 );
dim3 threads( NTHREADS, 1, 1 );
// Set itertia to zero
hipMemsetAsync(dneig, 0, 3*sizeof(int), queue->cuda_stream() );
hipLaunchKernelGGL(( dsiinertia_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dneig);
return info;
}
// end magmablas_ddiinertia
| d4b64817ed05bbcad5640d22d7a8b16400c5ae52.cu | /*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions d -> s
@author Stan Tomov
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#include "magma_templates.h"
#define NTHREADS 64
#define NBLOCKS 40
__global__ void
dsiinertia_kernel(int n, magmaDouble_const_ptr dA, int ldda, int *dneig)
{
const int tx = threadIdx.x;
const int blk = blockIdx.x;
int peig = 0, neig = 0, zeig = 0;
__shared__ int pe[NTHREADS], ne[NTHREADS], ze[NTHREADS];
// Each thread computes its part of the intertia
for(int i=tx + blk*NTHREADS; i<n; i+= NTHREADS*NBLOCKS) {
double diag = MAGMA_D_REAL(dA[i+i*ldda]);
if (diag > 0.0)
peig++;
else if (diag < 0.0)
neig++;
else
zeig++;
}
pe[tx] = peig;
ne[tx] = neig;
ze[tx] = zeig;
// The threads within a thread block sum their contributions to the inertia
magma_sum_reduce< NTHREADS >( tx, pe );
magma_sum_reduce< NTHREADS >( tx, ne );
magma_sum_reduce< NTHREADS >( tx, ze );
__syncthreads();
// Attomic sum the contributions from all theread blocks (by thread 0)
if (tx == 0){
atomicAdd(&dneig[0], pe[0]);
atomicAdd(&dneig[1], ne[0]);
atomicAdd(&dneig[2], ze[0]);
}
}
/***************************************************************************//**
Purpose
-------
magmablas_ddiinertia computes the inertia of a real diagonal matrix.
If matrix entries are real, magmablas_ddiinertia considers the real
part of the diagonal.
Arguments
----------
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
dA DOUBLE PRECISION array of DIMENSION ( LDDA, n ).
The input matrix A with diagonal entries for which the inertia
is computed. If dA is real, the computation is done on the
real part of the diagonal.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the leading dimension of A.
LDDA must be at least max( 1, n ).
@param[out]
dneig INTEGER array of DIMENSION 3 on the GPU memory.
The number of positive, negative, and zero eigenvalues
in this order.
@param[in]
queue magma_queue_t.
Queue to execute in.
@ingroup magma_hetrf
*******************************************************************************/
extern "C"
magma_int_t
magmablas_dsiinertia(
magma_int_t n,
magmaDouble_const_ptr dA, magma_int_t ldda,
int *dneig,
magma_queue_t queue )
{
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( n < 0 ) {
info = -1;
} else if ( ldda < max(1, n) ) {
info = -3;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if (n == 0)
return info;
dim3 grid( NBLOCKS, 1, 1 );
dim3 threads( NTHREADS, 1, 1 );
// Set itertia to zero
cudaMemsetAsync(dneig, 0, 3*sizeof(int), queue->cuda_stream() );
dsiinertia_kernel<<<grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dneig);
return info;
}
// end magmablas_ddiinertia
|
654b1d66510e0821e9fc299a3635b1c46343f4e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_cooperative_groups.h>
#include <thrust/gather.h>
#include "copying.hpp"
#include "cudf.h"
#include "gather.hpp"
#include "rmm/thrust_rmm_allocator.h"
#include "utilities/cudf_utils.h"
#include "utilities/type_dispatcher.hpp"
#include <bitmask/legacy_bitmask.hpp>
/**
* @brief Operations for copying from one column to another
* @file copying_ops.cu
*/
namespace {
/**---------------------------------------------------------------------------*
* @brief Function object to check if an index is within the bounds [begin,
* end).
*
*---------------------------------------------------------------------------**/
struct bounds_checker {
gdf_index_type const begin;
gdf_index_type const end;
__device__ bounds_checker(gdf_index_type begin_, gdf_index_type end_)
: begin{begin_}, end{end_} {}
__device__ __forceinline__ bool operator()(gdf_index_type const index) {
return ((index >= begin) && (index < end));
}
};
/**---------------------------------------------------------------------------*
* @brief Conditionally gathers the bits of a validity bitmask.
*
* Gathers the bits of a validity bitmask according to a gather map.
* If `pred(stencil[i])` evaluates to true, then bit `i` in `destination_mask`
* will equal bit `gather_map[i]` from the `source_mask`.
*
* If `pred(stencil[i])` evaluates to false, then bit `i` in `destination_mask`
* will be set to 0.
*
* If any value appears in `gather_map` more than once, the result is undefined.
*
* If any of the range [source_mask, source_mask + num_source_rows) overlaps
* [destination_mask, destination_mask + num_destination_rows), the result is
* undefined.
*
* @tparam T The type of the stencil array
* @tparam P The type of the predicate
* @param[in] source_mask The mask whose bits will be gathered
* @param[in] num_source_rows The number of bits in the source_mask
* @param[out] destination_mask The output after gathering the input
* @param[in] num_destination_rows The number of bits in the
* destination_mask
* @param[in] gather_map The map that indicates where elements from the
* input will be gathered to in the output. Length must be equal to
* `num_destination_rows`.
* @param[in] stencil An array of values that will be evaluated by the
* predicate. Length must be equal to `num_destination_rows`.
* @param[in] pred Unary predicate applied to the stencil values
*---------------------------------------------------------------------------**/
template <typename T, typename P>
__global__ void gather_bitmask_if_kernel(
gdf_valid_type const* const __restrict__ source_mask,
gdf_size_type const num_source_rows, gdf_valid_type* const destination_mask,
gdf_size_type const num_destination_rows, gdf_index_type const* gather_map,
T const* stencil, P pred) {
using MaskType = uint32_t;
constexpr uint32_t BITS_PER_MASK{sizeof(MaskType) * 8};
// TODO: Update to use new bit_mask_t
MaskType* const __restrict__ destination_mask32 =
reinterpret_cast<MaskType*>(destination_mask);
gdf_index_type destination_row = threadIdx.x + blockIdx.x * blockDim.x;
auto active_threads =
__ballot_sync(0xffffffff, destination_row < num_destination_rows);
while (destination_row < num_destination_rows) {
bool source_bit_is_valid{false};
bool const predicate_is_true{pred(stencil[destination_row])};
if (predicate_is_true) {
// If the predicate for `destination_row` is false, it's valid for
// `gather_map[destination_row]` to be out of bounds,
// therefore, only use it if the predicate evaluates to true
source_bit_is_valid =
gdf_is_valid(source_mask, gather_map[destination_row]);
}
bool const destination_bit_is_valid{
gdf_is_valid(destination_mask, destination_row)};
// Use ballot to find all valid bits in this warp and create the output
// bitmask element
// If the predicate is false, and the destination bit was valid, don't
// overwrite it
MaskType const result_mask =
__ballot_sync(active_threads,
(predicate_is_true and source_bit_is_valid) or
(not predicate_is_true and destination_bit_is_valid));
gdf_index_type const output_element = destination_row / BITS_PER_MASK;
// Only one thread writes output
if (0 == threadIdx.x % warpSize) {
destination_mask32[output_element] = result_mask;
}
destination_row += blockDim.x * gridDim.x;
active_threads =
__ballot_sync(active_threads, destination_row < num_destination_rows);
}
}
/**---------------------------------------------------------------------------*
* @brief Gathers the bits of a validity bitmask.
*
* Gathers the bits from the source bitmask into the destination bitmask
* according to a `gather_map` such that bit `i` in `destination_mask` will be
* equal to bit `gather_map[i]` from `source_bitmask`.
*
* Undefined behavior results if any value in `gather_map` is outside the range
* [0, num_source_rows).
*
* If any value appears in `gather_map` more than once, the result is undefined.
*
* If any of the range [source_mask, source_mask + num_source_rows) overlaps
* [destination_mask, destination_mask + num_destination_rows), the result is
* undefined.
*
* @param[in] source_mask The mask whose bits will be gathered
* @param[in] num_source_rows The number of bits in the source_mask
* @param[out] destination_mask The output after gathering the input
* @param[in] num_destination_rows The number of bits in the
* destination_mask
* @param[in] gather_map The map that indicates where elements from the
* input will be gathered to in the output. Length must be equal to
* `num_destination_rows`.
*---------------------------------------------------------------------------**/
__global__ void gather_bitmask_kernel(
gdf_valid_type const* const __restrict__ source_mask,
gdf_size_type const num_source_rows, gdf_valid_type* const destination_mask,
gdf_size_type const num_destination_rows,
gdf_index_type const* __restrict__ gather_map) {
using MaskType = uint32_t;
constexpr uint32_t BITS_PER_MASK{sizeof(MaskType) * 8};
// Cast bitmask to a type to a 4B type
// TODO: Update to use new bit_mask_t
MaskType* const __restrict__ destination_mask32 =
reinterpret_cast<MaskType*>(destination_mask);
gdf_index_type destination_row = threadIdx.x + blockIdx.x * blockDim.x;
auto active_threads =
__ballot_sync(0xffffffff, destination_row < num_destination_rows);
while (destination_row < num_destination_rows) {
bool const source_bit_is_valid{
gdf_is_valid(source_mask, gather_map[destination_row])};
// Use ballot to find all valid bits in this warp and create the output
// bitmask element
MaskType const result_mask{
__ballot_sync(active_threads, source_bit_is_valid)};
gdf_index_type const output_element = destination_row / BITS_PER_MASK;
// Only one thread writes output
if (0 == threadIdx.x % warpSize) {
destination_mask32[output_element] = result_mask;
}
destination_row += blockDim.x * gridDim.x;
active_threads =
__ballot_sync(active_threads, destination_row < num_destination_rows);
}
}
/**---------------------------------------------------------------------------*
* @brief Gathers the bits from a source bitmask into a destination bitmask
* based on a map.
*
* Gathers the bits from the source bitmask into the destination bitmask
* according to a `gather_map` such that bit `i` in `destination_mask` will be
* equal to bit `gather_map[i]` from `source_bitmask`.
*
* Optionally performs bounds checking on the values of the `gather_map` that
* ignores values outside [0, num_source_rows). It is undefined behavior if a
* value in `gather_map` is outside these bounds and bounds checking is not
* enabled.
*
* If the same value appears more than once in `gather_map`, the result is
* undefined.
*
* @param[in] source_mask The mask from which bits will be gathered
* @param[in] num_source_rows The number of bits in the source_mask
* @param[in,out] destination_mask The mask to which bits will be gathered.
* Buffer must be preallocated with sufficient storage to hold
* `num_destination_rows` bits.
* @param[in] num_destination_rows The number of bits in the destionation_mask
* @param[in] gather_map An array of indices that maps the bits in the source
* bitmask to bits in the destination bitmask. The number of elements in the
* `gather_map` must be equal to `num_destination_rows`.
* @param[in] check_bounds Optionally perform bounds checking of values in
* `gather_map`
* @param[in] stream Optional CUDA stream on which to execute kernels
*---------------------------------------------------------------------------**/
void gather_bitmask(gdf_valid_type const* source_mask,
gdf_size_type num_source_rows,
gdf_valid_type* destination_mask,
gdf_size_type num_destination_rows,
gdf_index_type const gather_map[],
bool check_bounds = false, hipStream_t stream = 0) {
CUDF_EXPECTS(destination_mask != nullptr, "Missing valid buffer allocation");
constexpr gdf_size_type BLOCK_SIZE{256};
const gdf_size_type gather_grid_size =
(num_destination_rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
gdf_valid_type* output_bitmask{destination_mask};
// Allocate a temporary results buffer if gathering in-place
bool const in_place{source_mask == destination_mask};
rmm::device_vector<gdf_valid_type> temp_bitmask;
if (in_place) {
temp_bitmask.resize(gdf_valid_allocation_size(num_destination_rows));
output_bitmask = temp_bitmask.data().get();
}
if (check_bounds) {
hipLaunchKernelGGL(( gather_bitmask_if_kernel), dim3(gather_grid_size), dim3(BLOCK_SIZE), 0, stream,
source_mask, num_source_rows, output_bitmask, num_destination_rows,
gather_map, gather_map, bounds_checker{0, num_source_rows});
} else {
hipLaunchKernelGGL(( gather_bitmask_kernel), dim3(gather_grid_size), dim3(BLOCK_SIZE), 0, stream,
source_mask, num_source_rows, output_bitmask, num_destination_rows,
gather_map);
}
CHECK_STREAM(stream);
if (in_place) {
thrust::copy(rmm::exec_policy(stream)->on(stream), temp_bitmask.begin(),
temp_bitmask.end(), destination_mask);
}
CHECK_STREAM(stream);
}
/**---------------------------------------------------------------------------*
* @brief Function object for gathering a type-erased
* gdf_column. To be used with the cudf::type_dispatcher.
*
*---------------------------------------------------------------------------**/
struct column_gatherer {
/**---------------------------------------------------------------------------*
* @brief Type-dispatched function to gather from one column to another based
* on a `gather_map`.
*
* @tparam ColumnType Dispatched type for the column being gathered
* @param source_column The column to gather from
* @param gather_map Array of indices that maps source elements to destination
* elements
* @param destination_column The column to gather into
* @param check_bounds Optionally perform bounds checking on the values of
* `gather_map`
* @param stream Optional CUDA stream on which to execute kernels
*---------------------------------------------------------------------------**/
template <typename ColumnType>
void operator()(gdf_column const* source_column,
gdf_index_type const gather_map[],
gdf_column* destination_column, bool check_bounds = false,
hipStream_t stream = 0) {
ColumnType const* const source_data{
static_cast<ColumnType const*>(source_column->data)};
ColumnType* destination_data{
static_cast<ColumnType*>(destination_column->data)};
gdf_size_type const num_destination_rows{destination_column->size};
// If gathering in-place, allocate temporary buffers to hold intermediate
// results
bool const in_place{source_data == destination_data};
rmm::device_vector<ColumnType> temp_destination;
if (in_place) {
temp_destination.resize(num_destination_rows);
destination_data = temp_destination.data().get();
}
if (check_bounds) {
thrust::gather_if(rmm::exec_policy(stream)->on(stream), gather_map,
gather_map + num_destination_rows, gather_map,
source_data, destination_data,
bounds_checker{0, source_column->size});
} else {
thrust::gather(rmm::exec_policy(stream)->on(stream), gather_map,
gather_map + num_destination_rows, source_data,
destination_data);
}
// Copy temporary buffers used for in-place gather to destination column
if (in_place) {
thrust::copy(rmm::exec_policy(stream)->on(stream),
temp_destination.begin(), temp_destination.end(),
static_cast<ColumnType*>(destination_column->data));
}
if (destination_column->valid != nullptr) {
gather_bitmask(source_column->valid, source_column->size,
destination_column->valid, num_destination_rows,
gather_map, check_bounds, stream);
// TODO compute the null count in the gather_bitmask kernels
gdf_error gdf_status = set_null_count(destination_column);
CUDF_EXPECTS(GDF_SUCCESS == gdf_status, "set_null_count failed");
}
CHECK_STREAM(stream);
}
};
} // namespace
namespace cudf {
namespace detail {
void gather(table const* source_table, gdf_index_type const gather_map[],
table* destination_table, bool check_bounds, hipStream_t stream) {
CUDF_EXPECTS(nullptr != source_table, "source table is null");
CUDF_EXPECTS(nullptr != destination_table, "destination table is null");
// If the destination is empty, return immediately as there is nothing to
// gather
if (0 == destination_table->num_rows()) {
return;
}
CUDF_EXPECTS(nullptr != gather_map, "gather_map is null");
CUDF_EXPECTS(source_table->num_columns() == destination_table->num_columns(),
"Mismatched number of columns");
auto gather_column = [gather_map, check_bounds, stream](
gdf_column const* source, gdf_column* destination) {
CUDF_EXPECTS(source->dtype == destination->dtype, "Column type mismatch");
// If the source column has a valid buffer, the destination column must
// also have one
bool const source_has_nulls{source->valid != nullptr};
bool const dest_has_nulls{destination->valid != nullptr};
CUDF_EXPECTS((source_has_nulls && dest_has_nulls) || (not source_has_nulls),
"Missing destination validity buffer");
// TODO: Each column could be gathered on a separate stream
cudf::type_dispatcher(source->dtype, column_gatherer{}, source, gather_map,
destination, check_bounds, stream);
return destination;
};
// Gather columns one-by-one
std::transform(source_table->begin(), source_table->end(),
destination_table->begin(), destination_table->begin(),
gather_column);
}
} // namespace detail
void gather(table const* source_table, gdf_index_type const gather_map[],
table* destination_table) {
detail::gather(source_table, gather_map, destination_table);
}
} // namespace cudf
| 654b1d66510e0821e9fc299a3635b1c46343f4e9.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cooperative_groups.h>
#include <thrust/gather.h>
#include "copying.hpp"
#include "cudf.h"
#include "gather.hpp"
#include "rmm/thrust_rmm_allocator.h"
#include "utilities/cudf_utils.h"
#include "utilities/type_dispatcher.hpp"
#include <bitmask/legacy_bitmask.hpp>
/**
* @brief Operations for copying from one column to another
* @file copying_ops.cu
*/
namespace {
/**---------------------------------------------------------------------------*
* @brief Function object to check if an index is within the bounds [begin,
* end).
*
*---------------------------------------------------------------------------**/
struct bounds_checker {
gdf_index_type const begin;
gdf_index_type const end;
__device__ bounds_checker(gdf_index_type begin_, gdf_index_type end_)
: begin{begin_}, end{end_} {}
__device__ __forceinline__ bool operator()(gdf_index_type const index) {
return ((index >= begin) && (index < end));
}
};
/**---------------------------------------------------------------------------*
* @brief Conditionally gathers the bits of a validity bitmask.
*
* Gathers the bits of a validity bitmask according to a gather map.
* If `pred(stencil[i])` evaluates to true, then bit `i` in `destination_mask`
* will equal bit `gather_map[i]` from the `source_mask`.
*
* If `pred(stencil[i])` evaluates to false, then bit `i` in `destination_mask`
* will be set to 0.
*
* If any value appears in `gather_map` more than once, the result is undefined.
*
* If any of the range [source_mask, source_mask + num_source_rows) overlaps
* [destination_mask, destination_mask + num_destination_rows), the result is
* undefined.
*
* @tparam T The type of the stencil array
* @tparam P The type of the predicate
* @param[in] source_mask The mask whose bits will be gathered
* @param[in] num_source_rows The number of bits in the source_mask
* @param[out] destination_mask The output after gathering the input
* @param[in] num_destination_rows The number of bits in the
* destination_mask
* @param[in] gather_map The map that indicates where elements from the
* input will be gathered to in the output. Length must be equal to
* `num_destination_rows`.
* @param[in] stencil An array of values that will be evaluated by the
* predicate. Length must be equal to `num_destination_rows`.
* @param[in] pred Unary predicate applied to the stencil values
*---------------------------------------------------------------------------**/
template <typename T, typename P>
__global__ void gather_bitmask_if_kernel(
gdf_valid_type const* const __restrict__ source_mask,
gdf_size_type const num_source_rows, gdf_valid_type* const destination_mask,
gdf_size_type const num_destination_rows, gdf_index_type const* gather_map,
T const* stencil, P pred) {
using MaskType = uint32_t;
constexpr uint32_t BITS_PER_MASK{sizeof(MaskType) * 8};
// TODO: Update to use new bit_mask_t
MaskType* const __restrict__ destination_mask32 =
reinterpret_cast<MaskType*>(destination_mask);
gdf_index_type destination_row = threadIdx.x + blockIdx.x * blockDim.x;
auto active_threads =
__ballot_sync(0xffffffff, destination_row < num_destination_rows);
while (destination_row < num_destination_rows) {
bool source_bit_is_valid{false};
bool const predicate_is_true{pred(stencil[destination_row])};
if (predicate_is_true) {
// If the predicate for `destination_row` is false, it's valid for
// `gather_map[destination_row]` to be out of bounds,
// therefore, only use it if the predicate evaluates to true
source_bit_is_valid =
gdf_is_valid(source_mask, gather_map[destination_row]);
}
bool const destination_bit_is_valid{
gdf_is_valid(destination_mask, destination_row)};
// Use ballot to find all valid bits in this warp and create the output
// bitmask element
// If the predicate is false, and the destination bit was valid, don't
// overwrite it
MaskType const result_mask =
__ballot_sync(active_threads,
(predicate_is_true and source_bit_is_valid) or
(not predicate_is_true and destination_bit_is_valid));
gdf_index_type const output_element = destination_row / BITS_PER_MASK;
// Only one thread writes output
if (0 == threadIdx.x % warpSize) {
destination_mask32[output_element] = result_mask;
}
destination_row += blockDim.x * gridDim.x;
active_threads =
__ballot_sync(active_threads, destination_row < num_destination_rows);
}
}
/**---------------------------------------------------------------------------*
* @brief Gathers the bits of a validity bitmask.
*
* Gathers the bits from the source bitmask into the destination bitmask
* according to a `gather_map` such that bit `i` in `destination_mask` will be
* equal to bit `gather_map[i]` from `source_bitmask`.
*
* Undefined behavior results if any value in `gather_map` is outside the range
* [0, num_source_rows).
*
* If any value appears in `gather_map` more than once, the result is undefined.
*
* If any of the range [source_mask, source_mask + num_source_rows) overlaps
* [destination_mask, destination_mask + num_destination_rows), the result is
* undefined.
*
* @param[in] source_mask The mask whose bits will be gathered
* @param[in] num_source_rows The number of bits in the source_mask
* @param[out] destination_mask The output after gathering the input
* @param[in] num_destination_rows The number of bits in the
* destination_mask
* @param[in] gather_map The map that indicates where elements from the
* input will be gathered to in the output. Length must be equal to
* `num_destination_rows`.
*---------------------------------------------------------------------------**/
__global__ void gather_bitmask_kernel(
gdf_valid_type const* const __restrict__ source_mask,
gdf_size_type const num_source_rows, gdf_valid_type* const destination_mask,
gdf_size_type const num_destination_rows,
gdf_index_type const* __restrict__ gather_map) {
using MaskType = uint32_t;
constexpr uint32_t BITS_PER_MASK{sizeof(MaskType) * 8};
// Cast bitmask to a type to a 4B type
// TODO: Update to use new bit_mask_t
MaskType* const __restrict__ destination_mask32 =
reinterpret_cast<MaskType*>(destination_mask);
gdf_index_type destination_row = threadIdx.x + blockIdx.x * blockDim.x;
auto active_threads =
__ballot_sync(0xffffffff, destination_row < num_destination_rows);
while (destination_row < num_destination_rows) {
bool const source_bit_is_valid{
gdf_is_valid(source_mask, gather_map[destination_row])};
// Use ballot to find all valid bits in this warp and create the output
// bitmask element
MaskType const result_mask{
__ballot_sync(active_threads, source_bit_is_valid)};
gdf_index_type const output_element = destination_row / BITS_PER_MASK;
// Only one thread writes output
if (0 == threadIdx.x % warpSize) {
destination_mask32[output_element] = result_mask;
}
destination_row += blockDim.x * gridDim.x;
active_threads =
__ballot_sync(active_threads, destination_row < num_destination_rows);
}
}
/**---------------------------------------------------------------------------*
* @brief Gathers the bits from a source bitmask into a destination bitmask
* based on a map.
*
* Gathers the bits from the source bitmask into the destination bitmask
* according to a `gather_map` such that bit `i` in `destination_mask` will be
* equal to bit `gather_map[i]` from `source_bitmask`.
*
* Optionally performs bounds checking on the values of the `gather_map` that
* ignores values outside [0, num_source_rows). It is undefined behavior if a
* value in `gather_map` is outside these bounds and bounds checking is not
* enabled.
*
* If the same value appears more than once in `gather_map`, the result is
* undefined.
*
* @param[in] source_mask The mask from which bits will be gathered
* @param[in] num_source_rows The number of bits in the source_mask
* @param[in,out] destination_mask The mask to which bits will be gathered.
* Buffer must be preallocated with sufficient storage to hold
* `num_destination_rows` bits.
* @param[in] num_destination_rows The number of bits in the destionation_mask
* @param[in] gather_map An array of indices that maps the bits in the source
* bitmask to bits in the destination bitmask. The number of elements in the
* `gather_map` must be equal to `num_destination_rows`.
* @param[in] check_bounds Optionally perform bounds checking of values in
* `gather_map`
* @param[in] stream Optional CUDA stream on which to execute kernels
*---------------------------------------------------------------------------**/
void gather_bitmask(gdf_valid_type const* source_mask,
gdf_size_type num_source_rows,
gdf_valid_type* destination_mask,
gdf_size_type num_destination_rows,
gdf_index_type const gather_map[],
bool check_bounds = false, cudaStream_t stream = 0) {
CUDF_EXPECTS(destination_mask != nullptr, "Missing valid buffer allocation");
constexpr gdf_size_type BLOCK_SIZE{256};
const gdf_size_type gather_grid_size =
(num_destination_rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
gdf_valid_type* output_bitmask{destination_mask};
// Allocate a temporary results buffer if gathering in-place
bool const in_place{source_mask == destination_mask};
rmm::device_vector<gdf_valid_type> temp_bitmask;
if (in_place) {
temp_bitmask.resize(gdf_valid_allocation_size(num_destination_rows));
output_bitmask = temp_bitmask.data().get();
}
if (check_bounds) {
gather_bitmask_if_kernel<<<gather_grid_size, BLOCK_SIZE, 0, stream>>>(
source_mask, num_source_rows, output_bitmask, num_destination_rows,
gather_map, gather_map, bounds_checker{0, num_source_rows});
} else {
gather_bitmask_kernel<<<gather_grid_size, BLOCK_SIZE, 0, stream>>>(
source_mask, num_source_rows, output_bitmask, num_destination_rows,
gather_map);
}
CHECK_STREAM(stream);
if (in_place) {
thrust::copy(rmm::exec_policy(stream)->on(stream), temp_bitmask.begin(),
temp_bitmask.end(), destination_mask);
}
CHECK_STREAM(stream);
}
/**---------------------------------------------------------------------------*
* @brief Function object for gathering a type-erased
* gdf_column. To be used with the cudf::type_dispatcher.
*
*---------------------------------------------------------------------------**/
struct column_gatherer {
/**---------------------------------------------------------------------------*
* @brief Type-dispatched function to gather from one column to another based
* on a `gather_map`.
*
* @tparam ColumnType Dispatched type for the column being gathered
* @param source_column The column to gather from
* @param gather_map Array of indices that maps source elements to destination
* elements
* @param destination_column The column to gather into
* @param check_bounds Optionally perform bounds checking on the values of
* `gather_map`
* @param stream Optional CUDA stream on which to execute kernels
*---------------------------------------------------------------------------**/
template <typename ColumnType>
void operator()(gdf_column const* source_column,
gdf_index_type const gather_map[],
gdf_column* destination_column, bool check_bounds = false,
cudaStream_t stream = 0) {
ColumnType const* const source_data{
static_cast<ColumnType const*>(source_column->data)};
ColumnType* destination_data{
static_cast<ColumnType*>(destination_column->data)};
gdf_size_type const num_destination_rows{destination_column->size};
// If gathering in-place, allocate temporary buffers to hold intermediate
// results
bool const in_place{source_data == destination_data};
rmm::device_vector<ColumnType> temp_destination;
if (in_place) {
temp_destination.resize(num_destination_rows);
destination_data = temp_destination.data().get();
}
if (check_bounds) {
thrust::gather_if(rmm::exec_policy(stream)->on(stream), gather_map,
gather_map + num_destination_rows, gather_map,
source_data, destination_data,
bounds_checker{0, source_column->size});
} else {
thrust::gather(rmm::exec_policy(stream)->on(stream), gather_map,
gather_map + num_destination_rows, source_data,
destination_data);
}
// Copy temporary buffers used for in-place gather to destination column
if (in_place) {
thrust::copy(rmm::exec_policy(stream)->on(stream),
temp_destination.begin(), temp_destination.end(),
static_cast<ColumnType*>(destination_column->data));
}
if (destination_column->valid != nullptr) {
gather_bitmask(source_column->valid, source_column->size,
destination_column->valid, num_destination_rows,
gather_map, check_bounds, stream);
// TODO compute the null count in the gather_bitmask kernels
gdf_error gdf_status = set_null_count(destination_column);
CUDF_EXPECTS(GDF_SUCCESS == gdf_status, "set_null_count failed");
}
CHECK_STREAM(stream);
}
};
} // namespace
namespace cudf {
namespace detail {
void gather(table const* source_table, gdf_index_type const gather_map[],
table* destination_table, bool check_bounds, cudaStream_t stream) {
CUDF_EXPECTS(nullptr != source_table, "source table is null");
CUDF_EXPECTS(nullptr != destination_table, "destination table is null");
// If the destination is empty, return immediately as there is nothing to
// gather
if (0 == destination_table->num_rows()) {
return;
}
CUDF_EXPECTS(nullptr != gather_map, "gather_map is null");
CUDF_EXPECTS(source_table->num_columns() == destination_table->num_columns(),
"Mismatched number of columns");
auto gather_column = [gather_map, check_bounds, stream](
gdf_column const* source, gdf_column* destination) {
CUDF_EXPECTS(source->dtype == destination->dtype, "Column type mismatch");
// If the source column has a valid buffer, the destination column must
// also have one
bool const source_has_nulls{source->valid != nullptr};
bool const dest_has_nulls{destination->valid != nullptr};
CUDF_EXPECTS((source_has_nulls && dest_has_nulls) || (not source_has_nulls),
"Missing destination validity buffer");
// TODO: Each column could be gathered on a separate stream
cudf::type_dispatcher(source->dtype, column_gatherer{}, source, gather_map,
destination, check_bounds, stream);
return destination;
};
// Gather columns one-by-one
std::transform(source_table->begin(), source_table->end(),
destination_table->begin(), destination_table->begin(),
gather_column);
}
} // namespace detail
void gather(table const* source_table, gdf_index_type const gather_map[],
table* destination_table) {
detail::gather(source_table, gather_map, destination_table);
}
} // namespace cudf
|
77bcca15d1244ad58ccc8c7d95c2a813fd083966.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
| 77bcca15d1244ad58ccc8c7d95c2a813fd083966.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
c65426da68a2835ed747b1a3c7945ce95cadd8a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helpers.h"
__constant__ float ckernel[81];
__global__ void conv_cuda(float *input, float *output, int width, int height,
float *kernel, int channels, int k_width,
int kernels) {
int k = blockIdx.z;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
int output_idx = i * width * kernels + j * kernels + k;
extern __shared__ float sdata[];
int smem_2d_size = (blockDim.x + 2 * k_width) * (blockDim.y + 2 * k_width);
if (threadIdx.y < k_width) {
// Top Left
if (threadIdx.x < k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
// Top Right
int smem_x1 = smem_x + blockDim.x + k_width;
int smem_y1 = smem_y;
int gmem_x1 = gmem_x + blockDim.x + k_width;
int gmem_y1 = gmem_y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) * channels + c;
sdata[smem_index] = (gmem_x < 0 || gmem_y < 0) ? 0 : input[gmem_index];
// Top right
int gmem_index1 = gmem_x1 * channels + gmem_y1 * width * channels + c;
int smem_index1 =
(smem_y1 * (blockDim.x + 2 * k_width) + smem_x1) * channels + c;
sdata[smem_index1] =
(gmem_x1 >= width || gmem_y1 < 0) ? 0 : input[gmem_index1];
}
}
// Top Overhang
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
// Left Overhang
int smem_x1 = threadIdx.y;
int smem_y1 = threadIdx.x + k_width;
int gmem_x1 = blockIdx.x * blockDim.x + threadIdx.y - k_width;
int gmem_y1 = blockIdx.y * blockDim.y + threadIdx.x;
// // Right Overhang
// int smem_x2 = blockDim.x + k_width + smem_x1;
// int smem_y2 = smem_y1;
// int gmem_x2 = gmem_x1 + blockDim.x + k_width ;
// int gmem_y2 = gmem_y1;
// // Bottom Overhang
// int smem_x3 = smem_x;
// int smem_y3 = blockDim.y + k_width + smem_y;
// int gmem_x3 = gmem_x;
// int gmem_y3 = gmem_y + blockDim.y + k_width;
for (int c = 0; c < channels; c++) {
// Assign Top values
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) * channels + c;
sdata[smem_index] = (gmem_y < 0) ? 0 : input[gmem_index];
// Assign Left value
int gmem_index1 = gmem_x1 * channels + gmem_y1 * width * channels + c;
int smem_index1 =
(smem_y1 * (blockDim.x + 2 * k_width) + smem_x1) * channels + c;
sdata[smem_index1] = (gmem_x < 0) ? 0 : input[gmem_index1];
// //Assign Right
// int gmem_index2 = gmem_x2 * channels + gmem_y2 * width * channels + c;
// int smem_index2 =
// (smem_y2 * (blockDim.x + 2 * k_width) + smem_x2) + c *
// smem_2d_size;
// sdata[smem_index2] = (gmem_x3 >= width) ? 0 : input[gmem_index2];
// //Assign Bottom
// int gmem_index3 = gmem_x3 * channels + gmem_y3 * width * channels + c;
// int smem_index3 =
// (smem_y3 * (blockDim.x + 2 * k_width) + smem_x3) + c *
// smem_2d_size;
// sdata[smem_index3] = (gmem_y3 >= height) ? 0 : input[gmem_index3];
// //Assign Bottom
}
}
// Bottom Left
if (threadIdx.x < k_width && threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
int smem_x1 = smem_x + blockDim.x + k_width;
int smem_y1 = smem_y;
int gmem_x1 = gmem_x + blockDim.x + k_width;
int gmem_y1 = gmem_y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) * channels + c;
sdata[smem_index] =
(gmem_x < 0 || gmem_y >= height) ? 0 : input[gmem_index];
int gmem_index1 = gmem_x1 * channels + gmem_y1 * width * channels + c;
int smem_index1 =
(smem_y1 * (blockDim.x + 2 * k_width) + smem_x1) * channels + c;
sdata[smem_index1] =
(gmem_x1 >= width || gmem_y1 >= height) ? 0 : input[gmem_index1];
}
}
// // Bottom Right
// if (threadIdx.x >= blockDim.x - k_width &&
// threadIdx.y >= blockDim.y - k_width) {
// int smem_x = threadIdx.x + 2 * k_width;
// int smem_y = threadIdx.y + 2 * k_width;
// int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
// int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
// for (int c = 0; c < channels; c++) {
// int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
// int smem_index =
// (smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
// sdata[smem_index] =
// (gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
// }
// }
// Bottom
if (threadIdx.y >= blockDim.y - k_width) {
// Indexes for bottom padding
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
// Indexes for right side
int smem_x1 = threadIdx.y + 2 * k_width;
int smem_y1 = threadIdx.x + k_width;
int gmem_x1 = blockIdx.x * blockDim.x + threadIdx.y + k_width;
int gmem_y1 = blockIdx.y * blockDim.y + threadIdx.x;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) * channels + c;
sdata[smem_index] = (gmem_y >= height) ? 0 : input[gmem_index];
int gmem_index1 = gmem_x1 * channels + gmem_y1 * width * channels + c;
int smem_index1 =
(smem_y1 * (blockDim.x + 2 * k_width) + smem_x1) * channels + c;
sdata[smem_index1] = (gmem_x1 >= width) ? 0 : input[gmem_index1];
}
}
// Copy the block data
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) * channels + c;
sdata[smem_index] =
(gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
}
__syncthreads();
if (i >= height || j >= width) {
return;
}
float tmp_output = 0;
for (int c = 0; c < channels; c++) {
for (int k_i = 0; k_i <= 2 * k_width; k_i++) {
for (int k_j = 0; k_j <= 2 * k_width; k_j++) {
smem_x = threadIdx.x + k_j;
smem_y = threadIdx.y + k_i;
int smem_index = c + smem_x * channels +
smem_y * channels * (blockDim.x + 2 * k_width);
int kernel_index =
k * channels * (2 * k_width + 1) * (2 * k_width + 1) +
c * (2 * k_width + 1) * (2 * k_width + 1) +
k_i * (2 * k_width + 1) + k_j;
tmp_output += sdata[smem_index] * kernel[kernel_index];
}
}
}
output[output_idx] = tmp_output;
return;
}
int main(int argc, char *argv[]) {
char *outputfile = (char *)"cuda_out_reorder.png";
// Check input image name
if (argc < 2) {
std::cout << "No file input" << std::endl;
return 0;
}
//
// Check if the filename is valid
char *filename = argv[1];
std::cout << argv[1] << std::endl;
// Load Image
cv::Mat image;
image = load_image(filename);
if (image.empty()) {
std::cout << "File not exist" << std::endl;
return 0;
}
//==================================
// Define I/O sizes
//==================================
int padding = 1;
int channels = 3;
int height = image.rows;
int width = image.cols;
int kernels = 3;
std::cout << "Image dims (HxW)is " << height << "x" << width << std::endl;
int height_padded = height + 2 * padding;
int width_padded = width + 2 * padding;
int input_bytes = channels * height * width * sizeof(float);
int output_bytes = channels * height * width * sizeof(float);
std::cout << "Padded dims is " << height_padded << "x" << width_padded
<< std::endl;
float *h_input = (float *)image.data;
// float *h_output = new float[output_bytes];
float *h_output;
h_output = (float *)malloc(output_bytes);
float *d_input;
float *d_output;
hipMalloc((void **)&d_input, input_bytes);
hipMalloc((void **)&d_output, output_bytes);
hipMemcpy(d_input, h_input, input_bytes, hipMemcpyHostToDevice);
// invoke Kernel
int bx = 32;
int by = 32;
dim3 block(bx, by); // you will want to configure this
dim3 grid((width + block.x - 1) / block.x, (height + block.y - 1) / block.y,
3);
printf("Grid : {%d, %d, %d} blocks. Blocks : {%d, %d} threads.\n", grid.x,
grid.y, grid.z, block.x, block.y);
//==================================
// Define Kernel data
//==================================
// Mystery kernel
const float kernel_template[3][3] = {{1, 1, 1}, {1, -8, 1}, {1, 1, 1}};
float *d_kernel;
float h_kernel[3][3][3][3];
int kernel_bytes = 3 * 3 * 3 * 3 * sizeof(float);
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
hipMalloc((void **)&d_kernel, kernel_bytes);
hipMemcpy(d_kernel, h_kernel, kernel_bytes, hipMemcpyHostToDevice);
hipMemcpyToSymbol(ckernel, &h_kernel, kernel_bytes);
int k_size = 3;
int k_width = (k_size - 1) / 2;
int smem_size =
(bx + 2 * k_width) * (by + 2 * k_width) * channels * sizeof(float);
printf("SMEM size is %d \n", (bx + 2 * k_width) * (by + 2 * k_width));
//==================================
// CPU Convolution
//==================================
printf("Start conv\n");
double timeStampA = getTimeStamp();
hipLaunchKernelGGL(( conv_cuda), dim3(grid), dim3(block), smem_size, 0, d_input, d_output, width, height,
d_kernel, 3, k_width, kernels);
hipDeviceSynchronize();
double timeStampB = getTimeStamp();
hipMemcpy(h_output, d_output, input_bytes, hipMemcpyDeviceToHost);
//==================================
// Collect data
//==================================
// Print result
std::cout << "Total convolution time: " << timeStampB - timeStampA
<< std::endl;
std::cout << "Save Output to " << outputfile << std::endl;
save_image(outputfile, h_output, height, width);
hipFree(d_input);
hipFree(d_output);
hipFree(d_kernel);
hipDeviceReset();
delete[] h_output;
return 0;
} | c65426da68a2835ed747b1a3c7945ce95cadd8a9.cu | #include "helpers.h"
__constant__ float ckernel[81];
__global__ void conv_cuda(float *input, float *output, int width, int height,
float *kernel, int channels, int k_width,
int kernels) {
int k = blockIdx.z;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
int output_idx = i * width * kernels + j * kernels + k;
extern __shared__ float sdata[];
int smem_2d_size = (blockDim.x + 2 * k_width) * (blockDim.y + 2 * k_width);
if (threadIdx.y < k_width) {
// Top Left
if (threadIdx.x < k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
// Top Right
int smem_x1 = smem_x + blockDim.x + k_width;
int smem_y1 = smem_y;
int gmem_x1 = gmem_x + blockDim.x + k_width;
int gmem_y1 = gmem_y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) * channels + c;
sdata[smem_index] = (gmem_x < 0 || gmem_y < 0) ? 0 : input[gmem_index];
// Top right
int gmem_index1 = gmem_x1 * channels + gmem_y1 * width * channels + c;
int smem_index1 =
(smem_y1 * (blockDim.x + 2 * k_width) + smem_x1) * channels + c;
sdata[smem_index1] =
(gmem_x1 >= width || gmem_y1 < 0) ? 0 : input[gmem_index1];
}
}
// Top Overhang
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y - k_width;
// Left Overhang
int smem_x1 = threadIdx.y;
int smem_y1 = threadIdx.x + k_width;
int gmem_x1 = blockIdx.x * blockDim.x + threadIdx.y - k_width;
int gmem_y1 = blockIdx.y * blockDim.y + threadIdx.x;
// // Right Overhang
// int smem_x2 = blockDim.x + k_width + smem_x1;
// int smem_y2 = smem_y1;
// int gmem_x2 = gmem_x1 + blockDim.x + k_width ;
// int gmem_y2 = gmem_y1;
// // Bottom Overhang
// int smem_x3 = smem_x;
// int smem_y3 = blockDim.y + k_width + smem_y;
// int gmem_x3 = gmem_x;
// int gmem_y3 = gmem_y + blockDim.y + k_width;
for (int c = 0; c < channels; c++) {
// Assign Top values
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) * channels + c;
sdata[smem_index] = (gmem_y < 0) ? 0 : input[gmem_index];
// Assign Left value
int gmem_index1 = gmem_x1 * channels + gmem_y1 * width * channels + c;
int smem_index1 =
(smem_y1 * (blockDim.x + 2 * k_width) + smem_x1) * channels + c;
sdata[smem_index1] = (gmem_x < 0) ? 0 : input[gmem_index1];
// //Assign Right
// int gmem_index2 = gmem_x2 * channels + gmem_y2 * width * channels + c;
// int smem_index2 =
// (smem_y2 * (blockDim.x + 2 * k_width) + smem_x2) + c *
// smem_2d_size;
// sdata[smem_index2] = (gmem_x3 >= width) ? 0 : input[gmem_index2];
// //Assign Bottom
// int gmem_index3 = gmem_x3 * channels + gmem_y3 * width * channels + c;
// int smem_index3 =
// (smem_y3 * (blockDim.x + 2 * k_width) + smem_x3) + c *
// smem_2d_size;
// sdata[smem_index3] = (gmem_y3 >= height) ? 0 : input[gmem_index3];
// //Assign Bottom
}
}
// Bottom Left
if (threadIdx.x < k_width && threadIdx.y >= blockDim.y - k_width) {
int smem_x = threadIdx.x;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x - k_width;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
int smem_x1 = smem_x + blockDim.x + k_width;
int smem_y1 = smem_y;
int gmem_x1 = gmem_x + blockDim.x + k_width;
int gmem_y1 = gmem_y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) * channels + c;
sdata[smem_index] =
(gmem_x < 0 || gmem_y >= height) ? 0 : input[gmem_index];
int gmem_index1 = gmem_x1 * channels + gmem_y1 * width * channels + c;
int smem_index1 =
(smem_y1 * (blockDim.x + 2 * k_width) + smem_x1) * channels + c;
sdata[smem_index1] =
(gmem_x1 >= width || gmem_y1 >= height) ? 0 : input[gmem_index1];
}
}
// // Bottom Right
// if (threadIdx.x >= blockDim.x - k_width &&
// threadIdx.y >= blockDim.y - k_width) {
// int smem_x = threadIdx.x + 2 * k_width;
// int smem_y = threadIdx.y + 2 * k_width;
// int gmem_x = blockIdx.x * blockDim.x + threadIdx.x + k_width;
// int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
// for (int c = 0; c < channels; c++) {
// int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
// int smem_index =
// (smem_y * (blockDim.x + 2 * k_width) + smem_x) + c * smem_2d_size;
// sdata[smem_index] =
// (gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
// }
// }
// Bottom
if (threadIdx.y >= blockDim.y - k_width) {
// Indexes for bottom padding
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + 2 * k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y + k_width;
// Indexes for right side
int smem_x1 = threadIdx.y + 2 * k_width;
int smem_y1 = threadIdx.x + k_width;
int gmem_x1 = blockIdx.x * blockDim.x + threadIdx.y + k_width;
int gmem_y1 = blockIdx.y * blockDim.y + threadIdx.x;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) * channels + c;
sdata[smem_index] = (gmem_y >= height) ? 0 : input[gmem_index];
int gmem_index1 = gmem_x1 * channels + gmem_y1 * width * channels + c;
int smem_index1 =
(smem_y1 * (blockDim.x + 2 * k_width) + smem_x1) * channels + c;
sdata[smem_index1] = (gmem_x1 >= width) ? 0 : input[gmem_index1];
}
}
// Copy the block data
int smem_x = threadIdx.x + k_width;
int smem_y = threadIdx.y + k_width;
int gmem_x = blockIdx.x * blockDim.x + threadIdx.x;
int gmem_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int c = 0; c < channels; c++) {
int gmem_index = gmem_x * channels + gmem_y * width * channels + c;
int smem_index =
(smem_y * (blockDim.x + 2 * k_width) + smem_x) * channels + c;
sdata[smem_index] =
(gmem_x >= width || gmem_y >= height) ? 0 : input[gmem_index];
}
__syncthreads();
if (i >= height || j >= width) {
return;
}
float tmp_output = 0;
for (int c = 0; c < channels; c++) {
for (int k_i = 0; k_i <= 2 * k_width; k_i++) {
for (int k_j = 0; k_j <= 2 * k_width; k_j++) {
smem_x = threadIdx.x + k_j;
smem_y = threadIdx.y + k_i;
int smem_index = c + smem_x * channels +
smem_y * channels * (blockDim.x + 2 * k_width);
int kernel_index =
k * channels * (2 * k_width + 1) * (2 * k_width + 1) +
c * (2 * k_width + 1) * (2 * k_width + 1) +
k_i * (2 * k_width + 1) + k_j;
tmp_output += sdata[smem_index] * kernel[kernel_index];
}
}
}
output[output_idx] = tmp_output;
return;
}
int main(int argc, char *argv[]) {
char *outputfile = (char *)"cuda_out_reorder.png";
// Check input image name
if (argc < 2) {
std::cout << "No file input" << std::endl;
return 0;
}
//
// Check if the filename is valid
char *filename = argv[1];
std::cout << argv[1] << std::endl;
// Load Image
cv::Mat image;
image = load_image(filename);
if (image.empty()) {
std::cout << "File not exist" << std::endl;
return 0;
}
//==================================
// Define I/O sizes
//==================================
int padding = 1;
int channels = 3;
int height = image.rows;
int width = image.cols;
int kernels = 3;
std::cout << "Image dims (HxW)is " << height << "x" << width << std::endl;
int height_padded = height + 2 * padding;
int width_padded = width + 2 * padding;
int input_bytes = channels * height * width * sizeof(float);
int output_bytes = channels * height * width * sizeof(float);
std::cout << "Padded dims is " << height_padded << "x" << width_padded
<< std::endl;
float *h_input = (float *)image.data;
// float *h_output = new float[output_bytes];
float *h_output;
h_output = (float *)malloc(output_bytes);
float *d_input;
float *d_output;
cudaMalloc((void **)&d_input, input_bytes);
cudaMalloc((void **)&d_output, output_bytes);
cudaMemcpy(d_input, h_input, input_bytes, cudaMemcpyHostToDevice);
// invoke Kernel
int bx = 32;
int by = 32;
dim3 block(bx, by); // you will want to configure this
dim3 grid((width + block.x - 1) / block.x, (height + block.y - 1) / block.y,
3);
printf("Grid : {%d, %d, %d} blocks. Blocks : {%d, %d} threads.\n", grid.x,
grid.y, grid.z, block.x, block.y);
//==================================
// Define Kernel data
//==================================
// Mystery kernel
const float kernel_template[3][3] = {{1, 1, 1}, {1, -8, 1}, {1, 1, 1}};
float *d_kernel;
float h_kernel[3][3][3][3];
int kernel_bytes = 3 * 3 * 3 * 3 * sizeof(float);
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
cudaMalloc((void **)&d_kernel, kernel_bytes);
cudaMemcpy(d_kernel, h_kernel, kernel_bytes, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(ckernel, &h_kernel, kernel_bytes);
int k_size = 3;
int k_width = (k_size - 1) / 2;
int smem_size =
(bx + 2 * k_width) * (by + 2 * k_width) * channels * sizeof(float);
printf("SMEM size is %d \n", (bx + 2 * k_width) * (by + 2 * k_width));
//==================================
// CPU Convolution
//==================================
printf("Start conv\n");
double timeStampA = getTimeStamp();
conv_cuda<<<grid, block, smem_size>>>(d_input, d_output, width, height,
d_kernel, 3, k_width, kernels);
cudaDeviceSynchronize();
double timeStampB = getTimeStamp();
cudaMemcpy(h_output, d_output, input_bytes, cudaMemcpyDeviceToHost);
//==================================
// Collect data
//==================================
// Print result
std::cout << "Total convolution time: " << timeStampB - timeStampA
<< std::endl;
std::cout << "Save Output to " << outputfile << std::endl;
save_image(outputfile, h_output, height, width);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_kernel);
cudaDeviceReset();
delete[] h_output;
return 0;
} |
b282999be6d971fb446383296064be25124d58c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_u.cuh"
__device__ __host__
int updatestate (struct params *p, struct state *s, real *w ,int *ii, int field) {
int status=0;
// atomicExch(&(p->cmax),(wd[fencode3_pre(p,ii,soundspeed)]));
switch(field)
{
case rho:
s->rho=s->rho+(w[fencode3_u(p,ii,field)]);
break;
case mom1:
s->m1=s->m1+(w[fencode3_u(p,ii,field)]);
break;
case mom2:
s->m2=s->m2+(w[fencode3_u(p,ii,field)]);
break;
/*case mom3:
s->m3=s->m3+(w[fencode3_u(p,ii,field)]);
break;*/
case energy:
s->e=s->e+(w[fencode3_u(p,ii,field)]);
break;
case b1:
s->b1=s->b1+(w[fencode3_u(p,ii,field)]);
break;
case b2:
s->b2=s->b2+(w[fencode3_u(p,ii,field)]);
break;
/*case b3:
s->b3=s->b3+(w[fencode3_u(p,ii,field)]);
break;*/
};
return status;
}
__global__ void update_parallel(struct params *p, struct state *s, real *w, real *wmod)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,f;
int index,k;
__shared__ int ntot;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
//real g=p->g;
real *u, *v, *h;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ip,jp,ipg,jpg;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
//int shift=order*NVAR*dimp;
h=w+dimp*rho;
u=w+dimp*mom1;
v=w+dimp*mom2;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
for( f=rho; f<=b3; f++)
#else
for( f=rho; f<=b2; f++)
#endif
{
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
{
w[fencode3_u(p,iia,f)]=wmod[fencode3_u(p,iia,f)];
}
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_u(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuupdate(struct params **p, real **w, real **wmod,real **wtemp2, struct state **state,struct params **d_p, real **d_w, real **d_wmod, real ** d_wtemp2, struct state **d_state, int step)
//int cuupdate(struct params **p, real **w, real **wmod, real **wd, real **temp2, struct state **state,
// struct params **d_p, real **d_w, real **d_wmod, real **d_wtemp2, struct state **d_state, int step)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
dim3 dimBlock(dimblock, 1);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
// hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyHostToDevice);
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( update_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_state,*d_w,*d_wmod);
//printf("called update\n");
// hipDeviceSynchronize();
//following comments removed from if def pragmas if
//using MPI and copying all cell data to host (how slow!?)
//#ifdef USE_MPI
//#else
if((step%((*p)->cfgsavefrequency))==0)
//#endif
{
//following commentes removed from section if
//using MPI and copying all cell data to host (how slow!?)
/*#ifdef USE_MPI
hipMemcpy(*wmod, *d_w, NVAR*dimp*sizeof(real), hipMemcpyDeviceToHost);
#ifdef USE_SAC_3D
hipMemcpy(*wtemp2, *d_wtemp2,NTEMP2*(((*p)->n[0])+2)* (((*p)->n[1])+2)* (((*p)->n[2])+2)*sizeof(real), hipMemcpyDeviceToHost);
#else
hipMemcpy(*wtemp2, *d_wtemp2,NTEMP2*(((*p)->n[0])+2)* (((*p)->n[1])+2)*sizeof(real), hipMemcpyDeviceToHost);
#endif
#endif */
#ifdef USE_GPUD
#ifdef USE_SAC_3D
int ndimp=((*p)->n[0])*((*p)->n[1])*((*p)->n[2]);
#else
int ndimp= ((*p)->n[0])*((*p)->n[1]);
#endif
real *wt=(real *)calloc(ndimp*NVAR,sizeof(real));
int shift,oshift;
int ok1,oj1,oi1;
int oni,onj,onk;
int i1,j1,k1;
int ni,nj,nk;
real *wa=*w;
oni=((*p)->n[0])*((*p)->pnpe[0]);
onj=((*p)->n[1])*((*p)->pnpe[1]);
ni=((*p)->n[0]);
nj=((*p)->n[1]);
#ifdef USE_SAC_3D
onk=((*p)->n[2])*((*p)->pnpe[2]);
nk=((*p)->n[2]);
#endif
hipMemcpy(wt, *d_w, NVAR*ndimp*sizeof(real), hipMemcpyDeviceToHost);
for(int ivar=0; ivar<NVAR; ivar++)
{
#ifdef USE_SAC_3D
for(k1=0; k1<nk; k1++)
#endif
for(j1=0; j1<nj; j1++)
for(i1=0; i1<ni; i1++)
{
oi1=i1+((*p)->pipe[0]*ni);
oj1=j1+((*p)->pipe[1]*nj);
#ifdef USE_SAC_3D
shift=(k1*ni*nj+j1*ni+i1);
ok1=k1+((*p)->pipe[2]*nk);
oshift=(ok1*oni*onj+oj1*oni+oi1);
#else
shift=(j1*ni+i1);
oshift=(oj1*oni+oi1);
#endif
//if(i1==0 && j1==0)
//if(ivar==0 && ((*p)->ipe)==0 && step==5)
// printf("called update %d %d %d %lg %lg\n",ivar,shift,oshift+oni*onj*ivar,wa[oshift+oni*onj*ivar],wt[shift+ivar*ndimp]);//, wa[oshift+oni*onj*ivar]);//,wt[shift]);
wa[oshift+oni*onj*ivar]=wt[shift+ivar*ndimp];
}
}
printf("here1\n");
free(wt);
// free(wdt);
#else
hipMemcpy(*w, *d_w, NVAR*dimp*sizeof(real), hipMemcpyDeviceToHost);
#endif
// hipMemcpy(*w, *d_w, NVAR*dimp*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wd, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
hipMemcpy(*state, *d_state, sizeof(struct state), hipMemcpyDeviceToHost);
}
//hipMemcpy(*wnew, *d_wnew, 8*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_u, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cuupdatehostwd(struct params **p, real **wd, real **wmod,real **wtemp2, struct state **state,struct params **d_p, real **d_wd, real **d_wmod, real ** d_wtemp2, struct state **d_state, int step)
//int cuupdate(struct params **p, real **w, real **wmod, real **wd, real **temp2, struct state **state,
// struct params **d_p, real **d_w, real **d_wmod, real **d_wtemp2, struct state **d_state, int step)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
dim3 dimBlock(dimblock, 1);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
// hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyHostToDevice);
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_state,*d_w,*d_wmod);
//printf("called update\n");
// hipDeviceSynchronize();
#ifdef USE_GPUD
#ifdef USE_SAC_3D
int ndimp=((*p)->n[0])*((*p)->n[1])*((*p)->n[2]);
#else
int ndimp= ((*p)->n[0])*((*p)->n[1]);
#endif
real *wt=(real *)calloc(ndimp*NDERV,sizeof(real));
int shift,oshift;
int ok1,oj1,oi1;
int oni,onj,onk;
int i1,j1,k1;
int ni,nj,nk;
real *wa=*wd;
oni=((*p)->n[0])*((*p)->pnpe[0]);
onj=((*p)->n[1])*((*p)->pnpe[1]);
ni=((*p)->n[0]);
nj=((*p)->n[1]);
#ifdef USE_SAC_3D
onk=((*p)->n[2])*((*p)->pnpe[2]);
nk=((*p)->n[2]);
#endif
hipMemcpy(wt, *d_wd, NDERV*ndimp*sizeof(real), hipMemcpyDeviceToHost);
for(int ivar=0; ivar<NDERV; ivar++)
{
#ifdef USE_SAC_3D
for(k1=0; k1<nk; k1++)
#endif
for(j1=0; j1<nj; j1++)
for(i1=0; i1<ni; i1++)
{
oi1=i1+((*p)->pipe[0]*ni);
oj1=j1+((*p)->pipe[1]*nj);
#ifdef USE_SAC_3D
shift=(k1*ni*nj+j1*ni+i1);
ok1=k1+((*p)->pipe[2]*nk);
oshift=(ok1*oni*onj+oj1*oni+oi1);
#else
shift=(j1*ni+i1);
oshift=(oj1*oni+oi1);
#endif
//if(i1==0 && j1==0)
//if(ivar==0 && ((*p)->ipe)==0 && step==5)
// printf("called update %d %d %d %lg %lg\n",ivar,shift,oshift+oni*onj*ivar,wa[oshift+oni*onj*ivar],wt[shift+ivar*ndimp]);//, wa[oshift+oni*onj*ivar]);//,wt[shift]);
wa[oshift+oni*onj*ivar]=wt[shift+ivar*ndimp];
}
}
printf("here1\n");
free(wt);
// free(wdt);
#else
hipMemcpy(*wd, *d_wd, NDERV*dimp*sizeof(real), hipMemcpyDeviceToHost);
#endif
//checkErrors("copy data from device");
}
int cuupdatedevicewd(struct params **p, real **wd, real **wmod,real **wtemp2, struct state **state,struct params **d_p, real **d_wd, real **d_wmod, real ** d_wtemp2, struct state **d_state, int step)
//int cuupdate(struct params **p, real **w, real **wmod, real **wd, real **temp2, struct state **state,
// struct params **d_p, real **d_w, real **d_wmod, real **d_wtemp2, struct state **d_state, int step)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
dim3 dimBlock(dimblock, 1);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
// hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyHostToDevice);
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_state,*d_w,*d_wmod);
//printf("called update\n");
// hipDeviceSynchronize();
#ifdef USE_GPUD
#ifdef USE_SAC_3D
int ndimp=((*p)->n[0])*((*p)->n[1])*((*p)->n[2]);
#else
int ndimp= ((*p)->n[0])*((*p)->n[1]);
#endif
real *wt=(real *)calloc(ndimp*NDERV,sizeof(real));
int shift,oshift;
int ok1,oj1,oi1;
int oni,onj,onk;
int i1,j1,k1;
int ni,nj,nk;
real *wa=*wd;
oni=((*p)->n[0])*((*p)->pnpe[0]);
onj=((*p)->n[1])*((*p)->pnpe[1]);
ni=((*p)->n[0]);
nj=((*p)->n[1]);
hipMemcpy(*d_wd,wt, NDERV*ndimp*sizeof(real), hipMemcpyHostToDevice);
#ifdef USE_SAC_3D
onk=((*p)->n[2])*((*p)->pnpe[2]);
nk=((*p)->n[2]);
#endif
for(int ivar=0; ivar<NDERV; ivar++)
{
#ifdef USE_SAC_3D
for(k1=0; k1<nk; k1++)
#endif
for(j1=0; j1<nj; j1++)
for(i1=0; i1<ni; i1++)
{
oi1=i1+((*p)->pipe[0]*ni);
oj1=j1+((*p)->pipe[1]*nj);
#ifdef USE_SAC_3D
shift=(k1*ni*nj+j1*ni+i1);
ok1=k1+((*p)->pipe[2]*nk);
oshift=(ok1*oni*onj+oj1*oni+oi1);
#else
shift=(j1*ni+i1);
oshift=(oj1*oni+oi1);
#endif
//if(i1==0 && j1==0)
//if(ivar==0 && ((*p)->ipe)==0 && step==5)
// printf("called update %d %d %d %lg %lg\n",ivar,shift,oshift+oni*onj*ivar,wa[oshift+oni*onj*ivar],wt[shift+ivar*ndimp]);//, wa[oshift+oni*onj*ivar]);//,wt[shift]);
wa[oshift+oni*onj*ivar]=wt[shift+ivar*ndimp];
}
}
printf("here1\n");
free(wt);
// free(wdt);
#else
hipMemcpy(*d_wd, *wd, NDERV*dimp*sizeof(real), hipMemcpyHostToDevice);
#endif
//checkErrors("copy data from device");
}
int cufinish(struct params **p, real **w, real **wnew, struct state **state, struct params **d_p,struct bparams **d_bp, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, struct state **d_state, real **d_wtemp, real **d_wtemp1, real **d_wtemp2)
{
//hipMemcpy(*w, *d_w, 8*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wnew, 8*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_u, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
checkErrors_u("copy data from device");
hipFree(*d_p);
hipFree(*d_bp);
// hipFree(*d_state);
hipFree(*d_w);
hipFree(*d_wnew);
// hipFree(*d_u);
hipFree(*d_wmod);
hipFree(*d_dwn1);
hipFree(*d_wd);
hipFree(*d_wtemp);
hipFree(*d_wtemp1);
hipFree(*d_wtemp2);
}
#ifdef USE_MPI
int cufinishmgpu(struct params **p,real **w, real **wmod, real **temp2, real **gmpivisc0, real **gmpivisc1, real **gmpivisc2, real **gmpiw0, real **gmpiwmod0, real **gmpiw1, real **gmpiwmod1, real **gmpiw2, real **gmpiwmod2, struct params **d_p, real **d_w, real **d_wmod,real **d_wtemp2, real **d_gmpivisc0, real **d_gmpivisc1, real **d_gmpivisc2, real **d_gmpiw0, real **d_gmpiwmod0, real **d_gmpiw1, real **d_gmpiwmod1, real **d_gmpiw2, real **d_gmpiwmod2)
{
//hipMemcpy(*w, *d_w, 8*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wnew, 8*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_u, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
//checkErrors_u("copy data from device");
hipFree(*d_gmpiw0);
hipFree(*d_gmpiwmod0);
hipFree(*d_gmpiw1);
hipFree(*d_gmpiwmod1);
#ifdef USE_SAC_3D
hipFree(*d_gmpiw2);
hipFree(*d_gmpiwmod2);
hipFree(*d_gmpivisc2);
#endif
hipFree(*d_gmpivisc0);
hipFree(*d_gmpivisc1);
//free(*gmpiw0);
//free(*gmpiwmod0);
// free(*gmpiw1);
// free(*gmpiwmod1);
#ifdef USE_SAC_3D
free(*gmpiw2);
free(*gmpiwmod2);
free(*gmpivisc2);
#endif
free(*gmpivisc0);
free(*gmpivisc1);
//free(*temp2);
}
#endif
| b282999be6d971fb446383296064be25124d58c3.cu | #include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_u.cuh"
__device__ __host__
int updatestate (struct params *p, struct state *s, real *w ,int *ii, int field) {
int status=0;
// atomicExch(&(p->cmax),(wd[fencode3_pre(p,ii,soundspeed)]));
switch(field)
{
case rho:
s->rho=s->rho+(w[fencode3_u(p,ii,field)]);
break;
case mom1:
s->m1=s->m1+(w[fencode3_u(p,ii,field)]);
break;
case mom2:
s->m2=s->m2+(w[fencode3_u(p,ii,field)]);
break;
/*case mom3:
s->m3=s->m3+(w[fencode3_u(p,ii,field)]);
break;*/
case energy:
s->e=s->e+(w[fencode3_u(p,ii,field)]);
break;
case b1:
s->b1=s->b1+(w[fencode3_u(p,ii,field)]);
break;
case b2:
s->b2=s->b2+(w[fencode3_u(p,ii,field)]);
break;
/*case b3:
s->b3=s->b3+(w[fencode3_u(p,ii,field)]);
break;*/
};
return status;
}
__global__ void update_parallel(struct params *p, struct state *s, real *w, real *wmod)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,f;
int index,k;
__shared__ int ntot;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
//real g=p->g;
real *u, *v, *h;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ip,jp,ipg,jpg;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
//int shift=order*NVAR*dimp;
h=w+dimp*rho;
u=w+dimp*mom1;
v=w+dimp*mom2;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
for( f=rho; f<=b3; f++)
#else
for( f=rho; f<=b2; f++)
#endif
{
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
{
w[fencode3_u(p,iia,f)]=wmod[fencode3_u(p,iia,f)];
}
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_u(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuupdate(struct params **p, real **w, real **wmod,real **wtemp2, struct state **state,struct params **d_p, real **d_w, real **d_wmod, real ** d_wtemp2, struct state **d_state, int step)
//int cuupdate(struct params **p, real **w, real **wmod, real **wd, real **temp2, struct state **state,
// struct params **d_p, real **d_w, real **d_wmod, real **d_wtemp2, struct state **d_state, int step)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
dim3 dimBlock(dimblock, 1);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
// cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyHostToDevice);
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_state,*d_w,*d_wmod);
//printf("called update\n");
// cudaThreadSynchronize();
//following comments removed from if def pragmas if
//using MPI and copying all cell data to host (how slow!?)
//#ifdef USE_MPI
//#else
if((step%((*p)->cfgsavefrequency))==0)
//#endif
{
//following commentes removed from section if
//using MPI and copying all cell data to host (how slow!?)
/*#ifdef USE_MPI
cudaMemcpy(*wmod, *d_w, NVAR*dimp*sizeof(real), cudaMemcpyDeviceToHost);
#ifdef USE_SAC_3D
cudaMemcpy(*wtemp2, *d_wtemp2,NTEMP2*(((*p)->n[0])+2)* (((*p)->n[1])+2)* (((*p)->n[2])+2)*sizeof(real), cudaMemcpyDeviceToHost);
#else
cudaMemcpy(*wtemp2, *d_wtemp2,NTEMP2*(((*p)->n[0])+2)* (((*p)->n[1])+2)*sizeof(real), cudaMemcpyDeviceToHost);
#endif
#endif */
#ifdef USE_GPUD
#ifdef USE_SAC_3D
int ndimp=((*p)->n[0])*((*p)->n[1])*((*p)->n[2]);
#else
int ndimp= ((*p)->n[0])*((*p)->n[1]);
#endif
real *wt=(real *)calloc(ndimp*NVAR,sizeof(real));
int shift,oshift;
int ok1,oj1,oi1;
int oni,onj,onk;
int i1,j1,k1;
int ni,nj,nk;
real *wa=*w;
oni=((*p)->n[0])*((*p)->pnpe[0]);
onj=((*p)->n[1])*((*p)->pnpe[1]);
ni=((*p)->n[0]);
nj=((*p)->n[1]);
#ifdef USE_SAC_3D
onk=((*p)->n[2])*((*p)->pnpe[2]);
nk=((*p)->n[2]);
#endif
cudaMemcpy(wt, *d_w, NVAR*ndimp*sizeof(real), cudaMemcpyDeviceToHost);
for(int ivar=0; ivar<NVAR; ivar++)
{
#ifdef USE_SAC_3D
for(k1=0; k1<nk; k1++)
#endif
for(j1=0; j1<nj; j1++)
for(i1=0; i1<ni; i1++)
{
oi1=i1+((*p)->pipe[0]*ni);
oj1=j1+((*p)->pipe[1]*nj);
#ifdef USE_SAC_3D
shift=(k1*ni*nj+j1*ni+i1);
ok1=k1+((*p)->pipe[2]*nk);
oshift=(ok1*oni*onj+oj1*oni+oi1);
#else
shift=(j1*ni+i1);
oshift=(oj1*oni+oi1);
#endif
//if(i1==0 && j1==0)
//if(ivar==0 && ((*p)->ipe)==0 && step==5)
// printf("called update %d %d %d %lg %lg\n",ivar,shift,oshift+oni*onj*ivar,wa[oshift+oni*onj*ivar],wt[shift+ivar*ndimp]);//, wa[oshift+oni*onj*ivar]);//,wt[shift]);
wa[oshift+oni*onj*ivar]=wt[shift+ivar*ndimp];
}
}
printf("here1\n");
free(wt);
// free(wdt);
#else
cudaMemcpy(*w, *d_w, NVAR*dimp*sizeof(real), cudaMemcpyDeviceToHost);
#endif
// cudaMemcpy(*w, *d_w, NVAR*dimp*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wd, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
cudaMemcpy(*state, *d_state, sizeof(struct state), cudaMemcpyDeviceToHost);
}
//cudaMemcpy(*wnew, *d_wnew, 8*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_u, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cuupdatehostwd(struct params **p, real **wd, real **wmod,real **wtemp2, struct state **state,struct params **d_p, real **d_wd, real **d_wmod, real ** d_wtemp2, struct state **d_state, int step)
//int cuupdate(struct params **p, real **w, real **wmod, real **wd, real **temp2, struct state **state,
// struct params **d_p, real **d_w, real **d_wmod, real **d_wtemp2, struct state **d_state, int step)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
dim3 dimBlock(dimblock, 1);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
// cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyHostToDevice);
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_state,*d_w,*d_wmod);
//printf("called update\n");
// cudaThreadSynchronize();
#ifdef USE_GPUD
#ifdef USE_SAC_3D
int ndimp=((*p)->n[0])*((*p)->n[1])*((*p)->n[2]);
#else
int ndimp= ((*p)->n[0])*((*p)->n[1]);
#endif
real *wt=(real *)calloc(ndimp*NDERV,sizeof(real));
int shift,oshift;
int ok1,oj1,oi1;
int oni,onj,onk;
int i1,j1,k1;
int ni,nj,nk;
real *wa=*wd;
oni=((*p)->n[0])*((*p)->pnpe[0]);
onj=((*p)->n[1])*((*p)->pnpe[1]);
ni=((*p)->n[0]);
nj=((*p)->n[1]);
#ifdef USE_SAC_3D
onk=((*p)->n[2])*((*p)->pnpe[2]);
nk=((*p)->n[2]);
#endif
cudaMemcpy(wt, *d_wd, NDERV*ndimp*sizeof(real), cudaMemcpyDeviceToHost);
for(int ivar=0; ivar<NDERV; ivar++)
{
#ifdef USE_SAC_3D
for(k1=0; k1<nk; k1++)
#endif
for(j1=0; j1<nj; j1++)
for(i1=0; i1<ni; i1++)
{
oi1=i1+((*p)->pipe[0]*ni);
oj1=j1+((*p)->pipe[1]*nj);
#ifdef USE_SAC_3D
shift=(k1*ni*nj+j1*ni+i1);
ok1=k1+((*p)->pipe[2]*nk);
oshift=(ok1*oni*onj+oj1*oni+oi1);
#else
shift=(j1*ni+i1);
oshift=(oj1*oni+oi1);
#endif
//if(i1==0 && j1==0)
//if(ivar==0 && ((*p)->ipe)==0 && step==5)
// printf("called update %d %d %d %lg %lg\n",ivar,shift,oshift+oni*onj*ivar,wa[oshift+oni*onj*ivar],wt[shift+ivar*ndimp]);//, wa[oshift+oni*onj*ivar]);//,wt[shift]);
wa[oshift+oni*onj*ivar]=wt[shift+ivar*ndimp];
}
}
printf("here1\n");
free(wt);
// free(wdt);
#else
cudaMemcpy(*wd, *d_wd, NDERV*dimp*sizeof(real), cudaMemcpyDeviceToHost);
#endif
//checkErrors("copy data from device");
}
int cuupdatedevicewd(struct params **p, real **wd, real **wmod,real **wtemp2, struct state **state,struct params **d_p, real **d_wd, real **d_wmod, real ** d_wtemp2, struct state **d_state, int step)
//int cuupdate(struct params **p, real **w, real **wmod, real **wd, real **temp2, struct state **state,
// struct params **d_p, real **d_w, real **d_wmod, real **d_wtemp2, struct state **d_state, int step)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
dim3 dimBlock(dimblock, 1);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
// cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyHostToDevice);
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_state,*d_w,*d_wmod);
//printf("called update\n");
// cudaThreadSynchronize();
#ifdef USE_GPUD
#ifdef USE_SAC_3D
int ndimp=((*p)->n[0])*((*p)->n[1])*((*p)->n[2]);
#else
int ndimp= ((*p)->n[0])*((*p)->n[1]);
#endif
real *wt=(real *)calloc(ndimp*NDERV,sizeof(real));
int shift,oshift;
int ok1,oj1,oi1;
int oni,onj,onk;
int i1,j1,k1;
int ni,nj,nk;
real *wa=*wd;
oni=((*p)->n[0])*((*p)->pnpe[0]);
onj=((*p)->n[1])*((*p)->pnpe[1]);
ni=((*p)->n[0]);
nj=((*p)->n[1]);
cudaMemcpy(*d_wd,wt, NDERV*ndimp*sizeof(real), cudaMemcpyHostToDevice);
#ifdef USE_SAC_3D
onk=((*p)->n[2])*((*p)->pnpe[2]);
nk=((*p)->n[2]);
#endif
for(int ivar=0; ivar<NDERV; ivar++)
{
#ifdef USE_SAC_3D
for(k1=0; k1<nk; k1++)
#endif
for(j1=0; j1<nj; j1++)
for(i1=0; i1<ni; i1++)
{
oi1=i1+((*p)->pipe[0]*ni);
oj1=j1+((*p)->pipe[1]*nj);
#ifdef USE_SAC_3D
shift=(k1*ni*nj+j1*ni+i1);
ok1=k1+((*p)->pipe[2]*nk);
oshift=(ok1*oni*onj+oj1*oni+oi1);
#else
shift=(j1*ni+i1);
oshift=(oj1*oni+oi1);
#endif
//if(i1==0 && j1==0)
//if(ivar==0 && ((*p)->ipe)==0 && step==5)
// printf("called update %d %d %d %lg %lg\n",ivar,shift,oshift+oni*onj*ivar,wa[oshift+oni*onj*ivar],wt[shift+ivar*ndimp]);//, wa[oshift+oni*onj*ivar]);//,wt[shift]);
wa[oshift+oni*onj*ivar]=wt[shift+ivar*ndimp];
}
}
printf("here1\n");
free(wt);
// free(wdt);
#else
cudaMemcpy(*d_wd, *wd, NDERV*dimp*sizeof(real), cudaMemcpyHostToDevice);
#endif
//checkErrors("copy data from device");
}
int cufinish(struct params **p, real **w, real **wnew, struct state **state, struct params **d_p,struct bparams **d_bp, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, struct state **d_state, real **d_wtemp, real **d_wtemp1, real **d_wtemp2)
{
//cudaMemcpy(*w, *d_w, 8*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wnew, 8*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_u, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
checkErrors_u("copy data from device");
cudaFree(*d_p);
cudaFree(*d_bp);
// cudaFree(*d_state);
cudaFree(*d_w);
cudaFree(*d_wnew);
// cudaFree(*d_u);
cudaFree(*d_wmod);
cudaFree(*d_dwn1);
cudaFree(*d_wd);
cudaFree(*d_wtemp);
cudaFree(*d_wtemp1);
cudaFree(*d_wtemp2);
}
#ifdef USE_MPI
int cufinishmgpu(struct params **p,real **w, real **wmod, real **temp2, real **gmpivisc0, real **gmpivisc1, real **gmpivisc2, real **gmpiw0, real **gmpiwmod0, real **gmpiw1, real **gmpiwmod1, real **gmpiw2, real **gmpiwmod2, struct params **d_p, real **d_w, real **d_wmod,real **d_wtemp2, real **d_gmpivisc0, real **d_gmpivisc1, real **d_gmpivisc2, real **d_gmpiw0, real **d_gmpiwmod0, real **d_gmpiw1, real **d_gmpiwmod1, real **d_gmpiw2, real **d_gmpiwmod2)
{
//cudaMemcpy(*w, *d_w, 8*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wnew, 8*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_u, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
//checkErrors_u("copy data from device");
cudaFree(*d_gmpiw0);
cudaFree(*d_gmpiwmod0);
cudaFree(*d_gmpiw1);
cudaFree(*d_gmpiwmod1);
#ifdef USE_SAC_3D
cudaFree(*d_gmpiw2);
cudaFree(*d_gmpiwmod2);
cudaFree(*d_gmpivisc2);
#endif
cudaFree(*d_gmpivisc0);
cudaFree(*d_gmpivisc1);
//free(*gmpiw0);
//free(*gmpiwmod0);
// free(*gmpiw1);
// free(*gmpiwmod1);
#ifdef USE_SAC_3D
free(*gmpiw2);
free(*gmpiwmod2);
free(*gmpivisc2);
#endif
free(*gmpivisc0);
free(*gmpivisc1);
//free(*temp2);
}
#endif
|
821f512edc9f4d408951f0b1e0b0ce35464b211f.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define MSIZE 12*8*21*2 //22
#define MSIZE 12*8*21 //22
#define BLOCK_SIZE 256
#define WARP_SIZE 32
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
hipHostMalloc(newA_ptr, paddedSize * sizeof(float));
hipHostMalloc(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel( float* val,
int * cols,
int * rowDelimiters,
float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ volatile float partialSums[BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = __ldg(&cols[j]);
mySum += val[j] * __ldg(&vec[col]);
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
hipSetDevice(1);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = MSIZE * (BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * (spmv_numRows / 50); // 1% of entries will be non-zero
float maxval = 200.0;
hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float));
hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int));
hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice);
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(BLOCK_SIZE / WARP_SIZE));
for (int i=0; i<10; i++) // repeat 10 times
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost);
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
| 821f512edc9f4d408951f0b1e0b0ce35464b211f.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define MSIZE 12*8*21*2 //22
#define MSIZE 12*8*21 //22
#define BLOCK_SIZE 256
#define WARP_SIZE 32
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
cudaMallocHost(newA_ptr, paddedSize * sizeof(float));
cudaMallocHost(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel( float* val,
int * cols,
int * rowDelimiters,
float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ volatile float partialSums[BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = __ldg(&cols[j]);
mySum += val[j] * __ldg(&vec[col]);
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
cudaSetDevice(1);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = MSIZE * (BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * (spmv_numRows / 50); // 1% of entries will be non-zero
float maxval = 200.0;
cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float));
cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int));
cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(BLOCK_SIZE / WARP_SIZE));
for (int i=0; i<10; i++) // repeat 10 times
spmv_kernel <<<spmv_grid, BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost);
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
5b1b6dba8a508947d00f116b7a38dcf7a41c0c20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_unmix16(int32_t * u, int32_t * v, int16_t * out, uint32_t stride, uint32_t * numSamples, int32_t * mixbits, int32_t * mixres, int32_t theOutputPacketBytes, uint32_t frameLength)
{
int block = blockIdx.x % 8;
int index = blockIdx.x / 8;
int z = threadIdx.x + block * blockDim.x;
if (z < numSamples[index])
{
int32_t l, r;
int16_t * op = out + (index * theOutputPacketBytes) / 2;
if (mixres[index] != 0)
{
/* matrixed stereo */
l = (u + index * frameLength)[z] + (v + index * frameLength)[z] - ((mixres[index] * (v + index * frameLength)[z]) >> mixbits[index]);
r = l - (v + index * frameLength)[z];
op += stride* z;
op[0] = (int16_t)l;
op[1] = (int16_t)r;
}
else
{
/* Conventional separated stereo. */
op += stride * z;
op[0] = (int16_t)(u + index * frameLength)[z];
op[1] = (int16_t)(v + index * frameLength)[z];
}
}
} | 5b1b6dba8a508947d00f116b7a38dcf7a41c0c20.cu | #include "includes.h"
__global__ void gpu_unmix16(int32_t * u, int32_t * v, int16_t * out, uint32_t stride, uint32_t * numSamples, int32_t * mixbits, int32_t * mixres, int32_t theOutputPacketBytes, uint32_t frameLength)
{
int block = blockIdx.x % 8;
int index = blockIdx.x / 8;
int z = threadIdx.x + block * blockDim.x;
if (z < numSamples[index])
{
int32_t l, r;
int16_t * op = out + (index * theOutputPacketBytes) / 2;
if (mixres[index] != 0)
{
/* matrixed stereo */
l = (u + index * frameLength)[z] + (v + index * frameLength)[z] - ((mixres[index] * (v + index * frameLength)[z]) >> mixbits[index]);
r = l - (v + index * frameLength)[z];
op += stride* z;
op[0] = (int16_t)l;
op[1] = (int16_t)r;
}
else
{
/* Conventional separated stereo. */
op += stride * z;
op[0] = (int16_t)(u + index * frameLength)[z];
op[1] = (int16_t)(v + index * frameLength)[z];
}
}
} |
4d0c7f7c3ff421244906e9e88bd6bcc161e97243.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<cuda_runtime.h>
#include<hip/hip_runtime_api.h>
using std::cout;
using std::endl;
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if(err!=hipSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,hipGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
__global__ void bgr_to_gray_kernel( unsigned char* input,
unsigned char* output,
int width,
int height,
int colorWidthStep,
int grayWidthStep)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
//Only valid threads perform memory I/O
if((xIndex<width) && (yIndex<height))
{
//Location of colored pixel in input
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
//Location of gray pixel in output
const int gray_tid = yIndex * grayWidthStep + xIndex;
const unsigned char blue = input[color_tid];
const unsigned char green = input[color_tid + 1];
const unsigned char red = input[color_tid + 2];
const float gray = red * 0.3f + green * 0.59f + blue * 0.11f;
output[gray_tid] = static_cast<unsigned char>(gray);
}
}
void convert_to_gray(const cv::Mat& input, cv::Mat& output)
{
//Calculate total number of bytes of input and output image
const int colorBytes = input.step * input.rows;
const int grayBytes = output.step * output.rows;
unsigned char *d_input, *d_output;
//Allocate device memory
SAFE_CALL(hipMalloc<unsigned char>(&d_input,colorBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_output,grayBytes),"CUDA Malloc Failed");
//Copy data from OpenCV input image to device memory
SAFE_CALL(hipMemcpy(d_input,input.ptr(),colorBytes,hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(16,16);
//Calculate grid size to cover the whole image
const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y);
//Launch the color conversion kernel
hipLaunchKernelGGL(( bgr_to_gray_kernel), dim3(grid),dim3(block), 0, 0, d_input,d_output,input.cols,input.rows,input.step,output.step);
//Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
//Copy back data from destination device meory to OpenCV output image
SAFE_CALL(hipMemcpy(output.ptr(),d_output,grayBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
//Free the device memory
SAFE_CALL(hipFree(d_input),"CUDA Free Failed");
SAFE_CALL(hipFree(d_output),"CUDA Free Failed");
}
#define SIGMOID(x) ((x < -8.0) ? -8.0 : ((x > 8.0) ? 8.0 : (1 / (1 + expf(-x)))))
float sigmoid(float x)
{
return SIGMOID(x);
}
__global__ void ssigmoid( float* input,
float* output,
int width,
int height)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
//Only valid threads perform memory I/O
if((xIndex<width) && (yIndex<height)) {
int offs = (yIndex*width)+xIndex;
output[offs] = SIGMOID(input[offs]);
}
}
void map_sigmoid(const cv::Mat& input, cv::Mat& output)
{
//Calculate total number of bytes of input and output image
const int inputBytes = input.step * input.rows;
const int outputBytes = output.step * output.rows;
float *d_input, *d_output;
//Allocate device memory
SAFE_CALL(hipMalloc<float>(&d_input,inputBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_output,outputBytes),"CUDA Malloc Failed");
//Copy data from OpenCV input image to device memory
SAFE_CALL(hipMemcpy(d_input,input.ptr(),inputBytes,hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(16,16);
//Calculate grid size to cover the whole image
const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y);
//Launch the input conversion kernel
hipLaunchKernelGGL(( ssigmoid), dim3(grid),dim3(block), 0, 0, d_input,d_output,input.cols,input.rows);
//Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
//Copy back data from destination device meory to OpenCV output image
SAFE_CALL(hipMemcpy(output.ptr(),d_output,outputBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
//Free the device memory
SAFE_CALL(hipFree(d_input),"CUDA Free Failed");
SAFE_CALL(hipFree(d_output),"CUDA Free Failed");
}
| 4d0c7f7c3ff421244906e9e88bd6bcc161e97243.cu | #include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<cuda_runtime.h>
#include<cuda_runtime_api.h>
using std::cout;
using std::endl;
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if(err!=cudaSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
__global__ void bgr_to_gray_kernel( unsigned char* input,
unsigned char* output,
int width,
int height,
int colorWidthStep,
int grayWidthStep)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
//Only valid threads perform memory I/O
if((xIndex<width) && (yIndex<height))
{
//Location of colored pixel in input
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
//Location of gray pixel in output
const int gray_tid = yIndex * grayWidthStep + xIndex;
const unsigned char blue = input[color_tid];
const unsigned char green = input[color_tid + 1];
const unsigned char red = input[color_tid + 2];
const float gray = red * 0.3f + green * 0.59f + blue * 0.11f;
output[gray_tid] = static_cast<unsigned char>(gray);
}
}
void convert_to_gray(const cv::Mat& input, cv::Mat& output)
{
//Calculate total number of bytes of input and output image
const int colorBytes = input.step * input.rows;
const int grayBytes = output.step * output.rows;
unsigned char *d_input, *d_output;
//Allocate device memory
SAFE_CALL(cudaMalloc<unsigned char>(&d_input,colorBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_output,grayBytes),"CUDA Malloc Failed");
//Copy data from OpenCV input image to device memory
SAFE_CALL(cudaMemcpy(d_input,input.ptr(),colorBytes,cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(16,16);
//Calculate grid size to cover the whole image
const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y);
//Launch the color conversion kernel
bgr_to_gray_kernel<<<grid,block>>>(d_input,d_output,input.cols,input.rows,input.step,output.step);
//Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
//Copy back data from destination device meory to OpenCV output image
SAFE_CALL(cudaMemcpy(output.ptr(),d_output,grayBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
//Free the device memory
SAFE_CALL(cudaFree(d_input),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_output),"CUDA Free Failed");
}
#define SIGMOID(x) ((x < -8.0) ? -8.0 : ((x > 8.0) ? 8.0 : (1 / (1 + expf(-x)))))
float sigmoid(float x)
{
return SIGMOID(x);
}
__global__ void ssigmoid( float* input,
float* output,
int width,
int height)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
//Only valid threads perform memory I/O
if((xIndex<width) && (yIndex<height)) {
int offs = (yIndex*width)+xIndex;
output[offs] = SIGMOID(input[offs]);
}
}
void map_sigmoid(const cv::Mat& input, cv::Mat& output)
{
//Calculate total number of bytes of input and output image
const int inputBytes = input.step * input.rows;
const int outputBytes = output.step * output.rows;
float *d_input, *d_output;
//Allocate device memory
SAFE_CALL(cudaMalloc<float>(&d_input,inputBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_output,outputBytes),"CUDA Malloc Failed");
//Copy data from OpenCV input image to device memory
SAFE_CALL(cudaMemcpy(d_input,input.ptr(),inputBytes,cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(16,16);
//Calculate grid size to cover the whole image
const dim3 grid((input.cols + block.x - 1)/block.x, (input.rows + block.y - 1)/block.y);
//Launch the input conversion kernel
ssigmoid<<<grid,block>>>(d_input,d_output,input.cols,input.rows);
//Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
//Copy back data from destination device meory to OpenCV output image
SAFE_CALL(cudaMemcpy(output.ptr(),d_output,outputBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
//Free the device memory
SAFE_CALL(cudaFree(d_input),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_output),"CUDA Free Failed");
}
|
c526e59f14d38517e95155593d34986f9a2f6237.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include "patusrt.h"
// forward_decls -->
__global__ void initialize(float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max);
__global__ void gradient(float * * ux_1_0_out, float * * uy_2_0_out, float * * uz_3_0_out, float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max);
// <--
int main (int argc, char** argv)
{
int i;
hipError_t res;
// prepare grids
// declare_grids -->
float * ux_1_0_out;
float * uy_2_0_out;
float * uz_3_0_out;
float * u_0_0;
float * ux_1_0;
float * uy_2_0;
float * uz_3_0;
if ((argc!=4))
{
printf("Wrong number of parameters. Syntax:\n%s <x_max> <y_max> <z_max>\n", argv[0]);
exit(-1);
}
int x_max = atoi(argv[1]);
int y_max = atoi(argv[2]);
int z_max = atoi(argv[3]);
// <--
// allocate_grids -->
u_0_0=((float * )malloc(((((x_max+4)*(y_max+4))*(z_max+4))*sizeof (float))));
ux_1_0=((float * )malloc((((x_max*y_max)*z_max)*sizeof (float))));
uy_2_0=((float * )malloc((((x_max*y_max)*z_max)*sizeof (float))));
uz_3_0=((float * )malloc((((x_max*y_max)*z_max)*sizeof (float))));
// <--
// declare_GPU_grids -->
float * ux_1_0_out_gpu;
float * uy_2_0_out_gpu;
float * uz_3_0_out_gpu;
float * u_0_0_gpu;
float * ux_1_0_gpu;
float * uy_2_0_gpu;
float * uz_3_0_gpu;
dim3 thds(1, 1, 1);
dim3 blks(x_max, (y_max*z_max), 1);
// <--
// allocate_GPU_grids -->
hipMalloc(((void * * )( & u_0_0_gpu)), ((((x_max+4)*(y_max+4))*(z_max+4))*sizeof (float)));
hipMalloc(((void * * )( & uz_3_0_out_gpu)), (((x_max*y_max)*z_max)*sizeof (float * )));
hipMalloc(((void * * )( & uz_3_0_gpu)), (((x_max*y_max)*z_max)*sizeof (float)));
hipMalloc(((void * * )( & uy_2_0_gpu)), (((x_max*y_max)*z_max)*sizeof (float)));
hipMalloc(((void * * )( & ux_1_0_gpu)), (((x_max*y_max)*z_max)*sizeof (float)));
hipMalloc(((void * * )( & uy_2_0_out_gpu)), (((x_max*y_max)*z_max)*sizeof (float * )));
hipMalloc(((void * * )( & ux_1_0_out_gpu)), (((x_max*y_max)*z_max)*sizeof (float * )));
// <--
// copy_grids_to_GPU -->
hipMemcpy(((void * )u_0_0_gpu), ((void * )u_0_0), ((((x_max+4)*(y_max+4))*(z_max+4))*sizeof (float)), hipMemcpyHostToDevice);
hipMemcpy(((void * )uz_3_0_gpu), ((void * )uz_3_0), (((x_max*y_max)*z_max)*sizeof (float)), hipMemcpyHostToDevice);
hipMemcpy(((void * )uy_2_0_gpu), ((void * )uy_2_0), (((x_max*y_max)*z_max)*sizeof (float)), hipMemcpyHostToDevice);
hipMemcpy(((void * )ux_1_0_gpu), ((void * )ux_1_0), (((x_max*y_max)*z_max)*sizeof (float)), hipMemcpyHostToDevice);
// <--
// initialize_grids -->
hipLaunchKernelGGL(( initialize), dim3(blks), dim3(thds), 0, 0, u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.1, 0.2, 0.30000000000000004, x_max, y_max, z_max);
// <--
hipDeviceSynchronize ();
res = hipGetLastError ();
if (res != hipSuccess)
{
printf ("CUDA Error [Initialization]: %s.\n", hipGetErrorString (res));
}
long nFlopsPerStencil = 6;
long nGridPointsCount = 5 * ((x_max*y_max)*z_max);
long nBytesTransferred = 5 * (((((x_max+4)*(y_max+4))*(z_max+4))*sizeof (float))+(((((x_max*y_max)*z_max)*sizeof (float))+(((x_max*y_max)*z_max)*sizeof (float)))+(((x_max*y_max)*z_max)*sizeof (float))));
// warm up
// compute_stencil -->
hipLaunchKernelGGL(( gradient), dim3(blks), dim3(thds), 0, 0, ( & ux_1_0_out_gpu), ( & uy_2_0_out_gpu), ( & uz_3_0_out_gpu), u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.4, 0.5, 0.6, x_max, y_max, z_max);
// <--
hipDeviceSynchronize ();
res = hipGetLastError ();
if (res != hipSuccess)
{
printf ("CUDA Error [Stencil]: %s.\n", hipGetErrorString (res));
}
// run the benchmark
tic ();
for (i = 0; i < 5; i++)
{
// compute_stencil -->
hipLaunchKernelGGL(( gradient), dim3(blks), dim3(thds), 0, 0, ( & ux_1_0_out_gpu), ( & uy_2_0_out_gpu), ( & uz_3_0_out_gpu), u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.7, 0.7999999999999999, 0.8999999999999999, x_max, y_max, z_max);
// <--
hipDeviceSynchronize ();
}
toc (nFlopsPerStencil, nGridPointsCount, nBytesTransferred);
// free memory
// deallocate_grids -->
hipFree(((void * )u_0_0_gpu));
hipFree(((void * )uz_3_0_out_gpu));
hipFree(((void * )uz_3_0_gpu));
hipFree(((void * )uy_2_0_gpu));
hipFree(((void * )ux_1_0_gpu));
hipFree(((void * )uy_2_0_out_gpu));
hipFree(((void * )ux_1_0_out_gpu));
free(u_0_0);
free(ux_1_0);
free(uy_2_0);
free(uz_3_0);
// <--
hipDeviceReset ();
return EXIT_SUCCESS;
}
| c526e59f14d38517e95155593d34986f9a2f6237.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cutil.h>
#include "patusrt.h"
// forward_decls -->
__global__ void initialize(float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max);
__global__ void gradient(float * * ux_1_0_out, float * * uy_2_0_out, float * * uz_3_0_out, float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max);
// <--
int main (int argc, char** argv)
{
int i;
cudaError_t res;
// prepare grids
// declare_grids -->
float * ux_1_0_out;
float * uy_2_0_out;
float * uz_3_0_out;
float * u_0_0;
float * ux_1_0;
float * uy_2_0;
float * uz_3_0;
if ((argc!=4))
{
printf("Wrong number of parameters. Syntax:\n%s <x_max> <y_max> <z_max>\n", argv[0]);
exit(-1);
}
int x_max = atoi(argv[1]);
int y_max = atoi(argv[2]);
int z_max = atoi(argv[3]);
// <--
// allocate_grids -->
u_0_0=((float * )malloc(((((x_max+4)*(y_max+4))*(z_max+4))*sizeof (float))));
ux_1_0=((float * )malloc((((x_max*y_max)*z_max)*sizeof (float))));
uy_2_0=((float * )malloc((((x_max*y_max)*z_max)*sizeof (float))));
uz_3_0=((float * )malloc((((x_max*y_max)*z_max)*sizeof (float))));
// <--
// declare_GPU_grids -->
float * ux_1_0_out_gpu;
float * uy_2_0_out_gpu;
float * uz_3_0_out_gpu;
float * u_0_0_gpu;
float * ux_1_0_gpu;
float * uy_2_0_gpu;
float * uz_3_0_gpu;
dim3 thds(1, 1, 1);
dim3 blks(x_max, (y_max*z_max), 1);
// <--
// allocate_GPU_grids -->
cudaMalloc(((void * * )( & u_0_0_gpu)), ((((x_max+4)*(y_max+4))*(z_max+4))*sizeof (float)));
cudaMalloc(((void * * )( & uz_3_0_out_gpu)), (((x_max*y_max)*z_max)*sizeof (float * )));
cudaMalloc(((void * * )( & uz_3_0_gpu)), (((x_max*y_max)*z_max)*sizeof (float)));
cudaMalloc(((void * * )( & uy_2_0_gpu)), (((x_max*y_max)*z_max)*sizeof (float)));
cudaMalloc(((void * * )( & ux_1_0_gpu)), (((x_max*y_max)*z_max)*sizeof (float)));
cudaMalloc(((void * * )( & uy_2_0_out_gpu)), (((x_max*y_max)*z_max)*sizeof (float * )));
cudaMalloc(((void * * )( & ux_1_0_out_gpu)), (((x_max*y_max)*z_max)*sizeof (float * )));
// <--
// copy_grids_to_GPU -->
cudaMemcpy(((void * )u_0_0_gpu), ((void * )u_0_0), ((((x_max+4)*(y_max+4))*(z_max+4))*sizeof (float)), cudaMemcpyHostToDevice);
cudaMemcpy(((void * )uz_3_0_gpu), ((void * )uz_3_0), (((x_max*y_max)*z_max)*sizeof (float)), cudaMemcpyHostToDevice);
cudaMemcpy(((void * )uy_2_0_gpu), ((void * )uy_2_0), (((x_max*y_max)*z_max)*sizeof (float)), cudaMemcpyHostToDevice);
cudaMemcpy(((void * )ux_1_0_gpu), ((void * )ux_1_0), (((x_max*y_max)*z_max)*sizeof (float)), cudaMemcpyHostToDevice);
// <--
// initialize_grids -->
initialize<<<blks, thds>>>(u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.1, 0.2, 0.30000000000000004, x_max, y_max, z_max);
// <--
cudaThreadSynchronize ();
res = cudaGetLastError ();
if (res != cudaSuccess)
{
printf ("CUDA Error [Initialization]: %s.\n", cudaGetErrorString (res));
}
long nFlopsPerStencil = 6;
long nGridPointsCount = 5 * ((x_max*y_max)*z_max);
long nBytesTransferred = 5 * (((((x_max+4)*(y_max+4))*(z_max+4))*sizeof (float))+(((((x_max*y_max)*z_max)*sizeof (float))+(((x_max*y_max)*z_max)*sizeof (float)))+(((x_max*y_max)*z_max)*sizeof (float))));
// warm up
// compute_stencil -->
gradient<<<blks, thds>>>(( & ux_1_0_out_gpu), ( & uy_2_0_out_gpu), ( & uz_3_0_out_gpu), u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.4, 0.5, 0.6, x_max, y_max, z_max);
// <--
cudaThreadSynchronize ();
res = cudaGetLastError ();
if (res != cudaSuccess)
{
printf ("CUDA Error [Stencil]: %s.\n", cudaGetErrorString (res));
}
// run the benchmark
tic ();
for (i = 0; i < 5; i++)
{
// compute_stencil -->
gradient<<<blks, thds>>>(( & ux_1_0_out_gpu), ( & uy_2_0_out_gpu), ( & uz_3_0_out_gpu), u_0_0_gpu, ux_1_0_gpu, uy_2_0_gpu, uz_3_0_gpu, 0.7, 0.7999999999999999, 0.8999999999999999, x_max, y_max, z_max);
// <--
cudaThreadSynchronize ();
}
toc (nFlopsPerStencil, nGridPointsCount, nBytesTransferred);
// free memory
// deallocate_grids -->
cudaFree(((void * )u_0_0_gpu));
cudaFree(((void * )uz_3_0_out_gpu));
cudaFree(((void * )uz_3_0_gpu));
cudaFree(((void * )uy_2_0_gpu));
cudaFree(((void * )ux_1_0_gpu));
cudaFree(((void * )uy_2_0_out_gpu));
cudaFree(((void * )ux_1_0_out_gpu));
free(u_0_0);
free(ux_1_0);
free(uy_2_0);
free(uz_3_0);
// <--
cudaThreadExit ();
return EXIT_SUCCESS;
}
|
67e9e759dc4b46d252f4249e33df431ca342d008.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <random>
// #include "3rdparty/cub-1.8.0/hipcub/hipcub.hpp"
#include "common.h"
#include "gptKernels.h"
#include "transformerKernels.h"
/**
@file
Implemented the cuda kernel function and its launcher
that required by GPT model.
Currently, fp16 and fp32 versions are provided
*/
namespace lightseq {
namespace cuda {
/**
@brief: ker_gpt_embedding
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = batch_size
gridDim.y = token_seq_len
blockDim.x = hidden_size
@param
token_emb: [vocab_size, hidden_size]
pos_emb: [max_step, hidden_size]
token_id: input token id, [batch_size, token_seq_len]
output: result, [batch_size, token_seq_len, hidden_size]
real_seq_len: record seq len exclude padding, [batch_size]
padding_id, the padding_id, default 0
pos_offset: get real pos when decoding which gridDim.y=1
*/
template <typename T>
__global__ void ker_gpt_embedding(const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* real_seq_len, int padding_id,
int pos_offset) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int tid = token_id[target_pos];
if (tid == padding_id) {
// for padding id
output[target_pos * blockDim.x + threadIdx.x] = 0.f;
return;
}
if (threadIdx.x == 0) {
atomicAdd(real_seq_len + blockIdx.x, 1);
}
output[target_pos * blockDim.x + threadIdx.x] =
token_emb[tid * blockDim.x + threadIdx.x] +
pos_emb[(blockIdx.y + pos_offset) * blockDim.x + threadIdx.x];
}
/* fp16 version */
template <>
__global__ void ker_gpt_embedding<__half>(const __half* token_emb,
const __half* pos_emb,
const int* token_id, __half* output,
int* real_seq_len, int padding_id,
int pos_offset) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int tid = token_id[target_pos];
half2* output_h = (half2*)output;
if (tid == padding_id) {
// for padding id
output_h[target_pos * blockDim.x + threadIdx.x] = __float2half2_rn(0.f);
return;
}
if (threadIdx.x == 0) {
atomicAdd(real_seq_len + blockIdx.x, 1);
}
float2 te =
__half22float2(((const half2*)token_emb)[tid * blockDim.x + threadIdx.x]);
float2 pe = __half22float2(
((const half2*)
pos_emb)[(blockIdx.y + pos_offset) * blockDim.x + threadIdx.x]);
te.x += pe.x;
te.y += pe.y;
output_h[target_pos * blockDim.x + threadIdx.x] = __float22half2_rn(te);
}
template <typename T>
void ker_gpt_embedding_launcher(int batch_size, int batch_seq_len,
int hidden_size, hipStream_t stream,
const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* real_seq_len, int padding_id,
int pos_offset) {
hipLaunchKernelGGL(( ker_gpt_embedding<T>)
, dim3(dim3(batch_size, batch_seq_len)), dim3(hidden_size), 0, stream,
token_emb, pos_emb, token_id, output, real_seq_len, padding_id,
pos_offset);
}
template <>
void ker_gpt_embedding_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, hipStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int* real_seq_len, int padding_id, int pos_offset) {
hipLaunchKernelGGL(( ker_gpt_embedding<__half>)
, dim3(dim3(batch_size, batch_seq_len)), dim3(hidden_size / 2), 0, stream,
token_emb, pos_emb, token_id, output, real_seq_len, padding_id,
pos_offset);
}
template void ker_gpt_embedding_launcher<float>(
int batch_size, int batch_seq_len, int hidden_size, hipStream_t stream,
const float* token_emb, const float* pos_emb, const int* token_id,
float* output, int* real_seq_len, int padding_id, int pos_offset);
template void ker_gpt_embedding_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, hipStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int* real_seq_len, int padding_id, int pos_offset);
/**
@brief: ker_correlation_softmax_gpt
query-key correlation softmax for encoder self attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * batch_seq_len
blockDim.x = batch_seq_len
@param
correlation: [batch_size, head_num, batch_seq_len, batch_seq_len]
real_seq_len: [batch_size]
*/
template <typename T>
__global__ void ker_correlation_softmax_gpt(T* correlation,
const int* real_seq_len) {
int query_token_pos = blockIdx.y % blockDim.x;
if (query_token_pos >= real_seq_len[blockIdx.x]) {
return;
}
int mask = 0; // can see the token when mask=0
if (threadIdx.x > query_token_pos) {
mask = 1; // Can only see the token on the left side of it
}
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
float val = (float)correlation[idx];
float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum<float>(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / (ssum + epsilon));
}
template <typename T>
void ker_correlation_softmax_gpt_launcher(int batch_size, int batch_seq_len,
int head_num, hipStream_t stream,
T* correlation,
const int* real_seq_len) {
hipLaunchKernelGGL(( ker_correlation_softmax_gpt<T>)
, dim3(dim3(batch_size, head_num * batch_seq_len)), dim3(batch_seq_len), 0,
stream, correlation, real_seq_len);
}
template void ker_correlation_softmax_gpt_launcher<float>(
int batch_size, int batch_seq_len, int head_num, hipStream_t stream,
float* correlation, const int* real_seq_len);
template void ker_correlation_softmax_gpt_launcher<__half>(
int batch_size, int batch_seq_len, int head_num, hipStream_t stream,
__half* correlation, const int* real_seq_len);
/**
@brief: ker_attention_mask_weights
query-key correlation softmax for encoder self attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * dst_seq_len
blockDim.x = src_seq_len
@param
correlation: [batch_size, head_num, dst_seq_len, src_seq_len]
real_seq_len: [batch_size]
*/
template <typename T>
__global__ void ker_attention_mask_weights(T* correlation,
const int* real_seq_len,
int dst_seq_len, int src_seq_len) {
int query_token_pos = blockIdx.y % dst_seq_len + src_seq_len - dst_seq_len;
if (query_token_pos >= real_seq_len[blockIdx.x]) {
return;
}
int mask = 0; // can see the token when mask=0
if (threadIdx.x > query_token_pos) {
mask = 1; // Can only see the token on the left side of it
}
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
float val = (float)correlation[idx];
float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum<float>(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / (ssum + epsilon));
}
template <typename T>
void ker_attention_mask_weights_launcher(int batch_size, int dst_seq_len,
int src_seq_len, int head_num,
hipStream_t stream, T* correlation,
const int* real_seq_len) {
hipLaunchKernelGGL(( ker_attention_mask_weights<T>)
, dim3(dim3(batch_size, head_num * dst_seq_len)), dim3(src_seq_len), 0, stream,
correlation, real_seq_len, dst_seq_len, src_seq_len);
}
template void ker_attention_mask_weights_launcher<float>(
int batch_size, int dst_seq_len, int src_seq_len, int head_num,
hipStream_t stream, float* correlation, const int* real_seq_len);
template void ker_attention_mask_weights_launcher<__half>(
int batch_size, int dst_seq_len, int src_seq_len, int head_num,
hipStream_t stream, __half* correlation, const int* real_seq_len);
/**
@brief: ker_arrange_qkv_with_cache
split and reshape ori_qkv matrix into new_q, new_k, new_v during encoder
self-attention
ori_qkv is the result of gemm
@thread
gridDim.x = batch_size * batch_seq_len
gridDim.y = 3
blockDim.x = hidden_size
@param
ori_qkv: [batch_size, 1, 3, hidden_size]
qkv_bias: [3, hidden_size]
new_q: [batch_size, head_num, 1, dim_per_head]
max_batch_dim: max_batch_size * max_seq_len * hidden_size
batch_seq_len: the sequence length of the current batch
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_qkv_with_cache(const T* ori_qkv, const T* qkv_bias,
T* new_q, T* new_k, T* k_cache,
T* new_v, T* v_cache,
int max_batch_dim, int batch_seq_len,
int dim_per_head, int head_num) {
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int head_id = threadIdx.x / dim_per_head;
int dim_id = threadIdx.x % dim_per_head;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
T new_val;
if (token_id < batch_seq_len - 1) {
int old_target_id =
targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len - 1, dim_per_head);
if (blockIdx.y == 0) return;
if (blockIdx.y == 1) new_val = k_cache[old_target_id];
if (blockIdx.y == 2) new_val = v_cache[old_target_id];
} else {
new_val = ori_qkv[(batch_id * gridDim.y + blockIdx.y) * blockDim.x +
threadIdx.x] +
__ldg(&qkv_bias[blockIdx.y * blockDim.x + threadIdx.x]);
if (blockIdx.y == 0) {
target_id = targetid_4dim(batch_id, head_id, 0, dim_id, head_num, 1,
dim_per_head);
}
}
if (blockIdx.y == 0) new_q[target_id] = new_val;
if (blockIdx.y == 1) new_k[target_id] = new_val;
if (blockIdx.y == 2) new_v[target_id] = new_val;
}
template <>
__global__ void ker_arrange_qkv_with_cache<__half>(
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num) {
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int head_id = threadIdx.x / dim_per_head;
int dim_id = threadIdx.x % dim_per_head;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
half2 new_val;
const half2* p_ori_qkv = (const half2*)ori_qkv;
const half2* p_bias = (const half2*)qkv_bias;
const half2* p_k_cache = (const half2*)k_cache;
const half2* p_v_cache = (const half2*)v_cache;
half2* p_new_q = (half2*)new_q;
half2* p_new_k = (half2*)new_k;
half2* p_new_v = (half2*)new_v;
if (token_id < batch_seq_len - 1) {
int old_target_id =
targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len - 1, dim_per_head);
if (blockIdx.y == 0) return;
if (blockIdx.y == 1) new_val = p_k_cache[old_target_id];
if (blockIdx.y == 2) new_val = p_v_cache[old_target_id];
} else {
new_val =
__hadd2(p_ori_qkv[(batch_id * gridDim.y + blockIdx.y) * blockDim.x +
threadIdx.x],
__ldg(&p_bias[blockIdx.y * blockDim.x + threadIdx.x]));
if (blockIdx.y == 0) {
target_id = targetid_4dim(batch_id, head_id, 0, dim_id, head_num, 1,
dim_per_head);
}
}
if (blockIdx.y == 0) p_new_q[target_id] = new_val;
if (blockIdx.y == 1) p_new_k[target_id] = new_val;
if (blockIdx.y == 2) p_new_v[target_id] = new_val;
}
template <typename T>
void ker_arrange_qkv_with_cache_launcher(int batch_token_num, int hidden_size,
hipStream_t stream, const T* ori_qkv,
const T* qkv_bias, T* new_q, T* new_k,
T* k_cache, T* new_v, T* v_cache,
int max_batch_dim, int batch_seq_len,
int dim_per_head, int head_num) {
hipLaunchKernelGGL(( ker_arrange_qkv_with_cache<T>)
, dim3(dim3(batch_token_num, 3)), dim3(hidden_size), 0, stream,
ori_qkv, qkv_bias, new_q, new_k, k_cache, new_v, v_cache,
max_batch_dim, batch_seq_len, dim_per_head, head_num);
}
template <>
void ker_arrange_qkv_with_cache_launcher<__half>(
int batch_token_num, int hidden_size, hipStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num) {
hipLaunchKernelGGL(( ker_arrange_qkv_with_cache<__half>)
, dim3(dim3(batch_token_num, 3)), dim3(hidden_size / 2), 0, stream,
ori_qkv, qkv_bias, new_q, new_k, k_cache, new_v, v_cache,
max_batch_dim / 2, batch_seq_len, dim_per_head / 2, head_num);
}
template void ker_arrange_qkv_with_cache_launcher<float>(
int batch_token_num, int hidden_size, hipStream_t stream,
const float* ori_qkv, const float* qkv_bias, float* new_q, float* new_k,
float* k_cache, float* new_v, float* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num);
template void ker_arrange_qkv_with_cache_launcher<__half>(
int batch_token_num, int hidden_size, hipStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num);
/**
@brief: ker_ppl
compute ppl from logit
ppl = - (1 / n) * sum(log(i|i-1...))
one thread block compute log probability for the given token
@thread
gridDim.x = batch_size
gridDim.y = batch_seq_len
blockDim.x = max_thread_per_block
@param
logits: [batch_size, batch_seq_len, vocab_size]
input_ids: [batch_size, batch_seq_len]
real_seq_len: [batch_size]
ppl: [batch_size]
*/
template <typename T>
__global__ void ker_ppl(const T* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size) {
int seq_len = real_seq_len[blockIdx.x]; // remove "eos"
if (blockIdx.y >= seq_len) {
// will not contribute to ppl
return;
}
int token_idx_in_batch = blockIdx.x * gridDim.y + blockIdx.y;
int left_logit_idx = token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (token_idx_in_batch + 1) * vocab_size;
/*
step 1. find max logit over the whole vocab
*/
float max_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
max_logit = fmaxf(max_logit, (float)logits[idx]);
}
max_logit = blockReduceMax(max_logit);
__shared__ float s_max_logit;
if (threadIdx.x == 0) {
s_max_logit = max_logit;
}
__syncthreads();
/*
step 2. compute the log probability for the given token,
add it to the sequence's ppl
*/
float sum_exp_logit = 0.f;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float lgt = fmaxf((float)logits[idx] - s_max_logit, logit_thresh_min);
sum_exp_logit += expf(lgt);
}
sum_exp_logit = blockReduceSum(sum_exp_logit);
if (threadIdx.x == 0) {
int token_id = input_ids[token_idx_in_batch + 1];
float log_prob =
((float)logits[token_idx_in_batch * vocab_size + token_id] -
s_max_logit - logf(sum_exp_logit)) /
(float)seq_len;
atomicAdd(ppl + blockIdx.x, -log_prob);
}
}
template <typename T>
void ker_ppl_launcher(int batch_size, int batch_seq_len,
int max_thread_per_block, hipStream_t stream,
const T* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size) {
hipLaunchKernelGGL(( ker_ppl<T>)
, dim3(dim3(batch_size, batch_seq_len)), dim3(max_thread_per_block), 0, stream,
logits, input_ids, real_seq_len, ppl, vocab_size);
}
template void ker_ppl_launcher<float>(int batch_size, int batch_seq_len,
int max_thread_per_block,
hipStream_t stream, const float* logits,
const int* input_ids,
const int* real_seq_len, float* ppl,
int vocab_size);
template void ker_ppl_launcher<__half>(
int batch_size, int batch_seq_len, int max_thread_per_block,
hipStream_t stream, const __half* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size);
/**
@brief: ker_topk_sample
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
real_seq_len: [batch_size]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T, int k>
__global__ void ker_topk_sample(const T* logits, int* old_input_ids,
int* new_input_ids, const int* real_seq_len,
const int vocab_size, const int batch_seq_len,
int logits_seq_len, int* unfinished,
hiprandState_t* curandstate, int eos_id) {
int last_token_idx_in_batch = blockIdx.x * batch_seq_len + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (old_input_ids[last_token_idx_in_batch] == eos_id) {
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
// blockIdx.x * (batch_seq_len+1) + batch_seq_len
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = eos_id;
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/*
step1. find max logit and rough Kth logit over the whole vocab
*/
__shared__ float s_max_logit, s_topk_logit;
float rough_top_kth_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
rough_top_kth_logit = fmaxf(rough_top_kth_logit, (float)logits[idx]);
}
float max_logit = blockReduceMax(rough_top_kth_logit);
rough_top_kth_logit = blockRoughTopK<float, k>(rough_top_kth_logit);
if (threadIdx.x == 0) {
s_topk_logit = rough_top_kth_logit;
s_max_logit = max_logit;
}
__syncthreads();
/* step2 hold one logit per thread which larger than Kth logit and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx];
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
if (logit >= s_topk_logit) test_num++;
if (logit >= s_topk_logit && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
test_num = blockReduceSum(test_num);
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
typedef hipcub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = hiprand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
__shared__ int s_tid;
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 copy old_input_ids to new_input_ids and add new sampled ids */
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = s_tid;
// save the newly sampled ids to old_input_ids for next step inputs
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = s_tid;
}
}
template <typename T>
void ker_topk_sample_launcher(int batch_size, int batch_seq_len,
int logits_seq_len, int max_thread_per_block,
hipStream_t stream, const T* logits,
int* old_input_ids, int* new_input_ids,
const int* real_seq_len, const int vocab_size,
const int k, int* unfinished,
hiprandState_t* curandstate, int eos_id) {
if (k == 1)
hipLaunchKernelGGL(( ker_topk_sample<T, 1>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 2)
hipLaunchKernelGGL(( ker_topk_sample<T, 2>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 4)
hipLaunchKernelGGL(( ker_topk_sample<T, 4>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 8)
hipLaunchKernelGGL(( ker_topk_sample<T, 8>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 16)
hipLaunchKernelGGL(( ker_topk_sample<T, 16>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 32)
hipLaunchKernelGGL(( ker_topk_sample<T, 32>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else {
throw std::invalid_argument("topk argument should be in [1,2,4,8,16,32]");
}
}
template void ker_topk_sample_launcher<float>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const float* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const int k, int* unfinished,
hiprandState_t* curandstate, int eos_id);
template void ker_topk_sample_launcher<__half>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const __half* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const int k, int* unfinished,
hiprandState_t* curandstate, int eos_id);
/**
@brief: ker_topp_sample
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
real_seq_len: [batch_size]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T>
__global__ void ker_topp_sample(const T* logits, int* old_input_ids,
int* new_input_ids, const int* real_seq_len,
const int vocab_size, const int batch_seq_len,
int logits_seq_len, int* unfinished, float p,
hiprandState_t* curandstate, int eos_id) {
int token_idx_in_batch = blockIdx.x * batch_seq_len + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (old_input_ids[token_idx_in_batch] == eos_id) {
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = eos_id;
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/*
step1. find max logit in each thread and sample from these probs with nucleus
sampling
*/
__shared__ float s_max_logit;
float max_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
max_logit = fmaxf(max_logit, (float)logits[idx]);
}
float max_logit_array[1];
max_logit_array[0] = max_logit;
typedef cub::BlockRadixSort<float, 1024, 1> BlockRadixSort;
__shared__ typename BlockRadixSort::TempStorage sort_temp_storage;
BlockRadixSort(sort_temp_storage).SortDescending(max_logit_array);
float presum_max_logit_exp;
max_logit = max_logit_array[0];
float block_max_logit = blockReduceMax(max_logit);
if (threadIdx.x == 0) {
s_max_logit = block_max_logit;
}
__syncthreads();
float biased_logit_exp =
expf(fmaxf(max_logit - s_max_logit, logit_thresh_min));
typedef hipcub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage presum_temp_storage;
BlockScan(presum_temp_storage)
.InclusiveSum(biased_logit_exp, presum_max_logit_exp);
float topp_exp_threshold;
if (threadIdx.x == blockDim.x - 1) {
topp_exp_threshold = p * presum_max_logit_exp;
}
__shared__ float s_presum_logit_exp_threshold;
if (presum_max_logit_exp > topp_exp_threshold) {
presum_max_logit_exp = CUDA_FLOAT_INF_NEG;
}
float logit_exp_threshold = blockReduceMax(presum_max_logit_exp);
if (threadIdx.x == 0) {
s_presum_logit_exp_threshold = logit_exp_threshold;
}
__syncthreads();
__shared__ float s_logit_threshold;
if (presum_max_logit_exp == s_presum_logit_exp_threshold) {
s_logit_threshold = max_logit;
}
__syncthreads();
/* step2 hold one logit per thread and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx];
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
if (logit >= s_logit_threshold) test_num++;
if (logit >= s_logit_threshold && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
test_num = blockReduceSum(test_num);
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
BlockScan(presum_temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = hiprand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
__shared__ int s_tid;
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 copy old_input_ids to new_input_ids and add new sampled ids */
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = s_tid;
// save the newly sampled ids to old_input_ids for next step inputs
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = s_tid;
}
}
template <typename T>
void ker_topp_sample_launcher(int batch_size, int batch_seq_len,
int logits_seq_len, int max_thread_per_block,
hipStream_t stream, const T* logits,
int* old_input_ids, int* new_input_ids,
const int* real_seq_len, const int vocab_size,
const float p, int* unfinished,
hiprandState_t* curandstate, int eos_id) {
hipLaunchKernelGGL(( ker_topp_sample<T>), dim3(batch_size), dim3(max_thread_per_block), 0, stream,
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, p, curandstate, eos_id);
}
template void ker_topp_sample_launcher<float>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const float* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const float p, int* unfinished,
hiprandState_t* curandstate, int eos_id);
template void ker_topp_sample_launcher<__half>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, hipStream_t stream, const __half* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const float p, int* unfinished,
hiprandState_t* curandstate, int eos_id);
} // namespace cuda
} // namespace lightseq
| 67e9e759dc4b46d252f4249e33df431ca342d008.cu | #include <random>
// #include "3rdparty/cub-1.8.0/cub/cub.cuh"
#include "common.h"
#include "gptKernels.h"
#include "transformerKernels.h"
/**
@file
Implemented the cuda kernel function and its launcher
that required by GPT model.
Currently, fp16 and fp32 versions are provided
*/
namespace lightseq {
namespace cuda {
/**
@brief: ker_gpt_embedding
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = batch_size
gridDim.y = token_seq_len
blockDim.x = hidden_size
@param
token_emb: [vocab_size, hidden_size]
pos_emb: [max_step, hidden_size]
token_id: input token id, [batch_size, token_seq_len]
output: result, [batch_size, token_seq_len, hidden_size]
real_seq_len: record seq len exclude padding, [batch_size]
padding_id, the padding_id, default 0
pos_offset: get real pos when decoding which gridDim.y=1
*/
template <typename T>
__global__ void ker_gpt_embedding(const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* real_seq_len, int padding_id,
int pos_offset) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int tid = token_id[target_pos];
if (tid == padding_id) {
// for padding id
output[target_pos * blockDim.x + threadIdx.x] = 0.f;
return;
}
if (threadIdx.x == 0) {
atomicAdd(real_seq_len + blockIdx.x, 1);
}
output[target_pos * blockDim.x + threadIdx.x] =
token_emb[tid * blockDim.x + threadIdx.x] +
pos_emb[(blockIdx.y + pos_offset) * blockDim.x + threadIdx.x];
}
/* fp16 version */
template <>
__global__ void ker_gpt_embedding<__half>(const __half* token_emb,
const __half* pos_emb,
const int* token_id, __half* output,
int* real_seq_len, int padding_id,
int pos_offset) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int tid = token_id[target_pos];
half2* output_h = (half2*)output;
if (tid == padding_id) {
// for padding id
output_h[target_pos * blockDim.x + threadIdx.x] = __float2half2_rn(0.f);
return;
}
if (threadIdx.x == 0) {
atomicAdd(real_seq_len + blockIdx.x, 1);
}
float2 te =
__half22float2(((const half2*)token_emb)[tid * blockDim.x + threadIdx.x]);
float2 pe = __half22float2(
((const half2*)
pos_emb)[(blockIdx.y + pos_offset) * blockDim.x + threadIdx.x]);
te.x += pe.x;
te.y += pe.y;
output_h[target_pos * blockDim.x + threadIdx.x] = __float22half2_rn(te);
}
template <typename T>
void ker_gpt_embedding_launcher(int batch_size, int batch_seq_len,
int hidden_size, cudaStream_t stream,
const T* token_emb, const T* pos_emb,
const int* token_id, T* output,
int* real_seq_len, int padding_id,
int pos_offset) {
ker_gpt_embedding<T>
<<<dim3(batch_size, batch_seq_len), hidden_size, 0, stream>>>(
token_emb, pos_emb, token_id, output, real_seq_len, padding_id,
pos_offset);
}
template <>
void ker_gpt_embedding_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int* real_seq_len, int padding_id, int pos_offset) {
ker_gpt_embedding<__half>
<<<dim3(batch_size, batch_seq_len), hidden_size / 2, 0, stream>>>(
token_emb, pos_emb, token_id, output, real_seq_len, padding_id,
pos_offset);
}
template void ker_gpt_embedding_launcher<float>(
int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream,
const float* token_emb, const float* pos_emb, const int* token_id,
float* output, int* real_seq_len, int padding_id, int pos_offset);
template void ker_gpt_embedding_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream,
const __half* token_emb, const __half* pos_emb, const int* token_id,
__half* output, int* real_seq_len, int padding_id, int pos_offset);
/**
@brief: ker_correlation_softmax_gpt
query-key correlation softmax for encoder self attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * batch_seq_len
blockDim.x = batch_seq_len
@param
correlation: [batch_size, head_num, batch_seq_len, batch_seq_len]
real_seq_len: [batch_size]
*/
template <typename T>
__global__ void ker_correlation_softmax_gpt(T* correlation,
const int* real_seq_len) {
int query_token_pos = blockIdx.y % blockDim.x;
if (query_token_pos >= real_seq_len[blockIdx.x]) {
return;
}
int mask = 0; // can see the token when mask=0
if (threadIdx.x > query_token_pos) {
mask = 1; // Can only see the token on the left side of it
}
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
float val = (float)correlation[idx];
float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum<float>(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / (ssum + epsilon));
}
template <typename T>
void ker_correlation_softmax_gpt_launcher(int batch_size, int batch_seq_len,
int head_num, cudaStream_t stream,
T* correlation,
const int* real_seq_len) {
ker_correlation_softmax_gpt<T>
<<<dim3(batch_size, head_num * batch_seq_len), batch_seq_len, 0,
stream>>>(correlation, real_seq_len);
}
template void ker_correlation_softmax_gpt_launcher<float>(
int batch_size, int batch_seq_len, int head_num, cudaStream_t stream,
float* correlation, const int* real_seq_len);
template void ker_correlation_softmax_gpt_launcher<__half>(
int batch_size, int batch_seq_len, int head_num, cudaStream_t stream,
__half* correlation, const int* real_seq_len);
/**
@brief: ker_attention_mask_weights
query-key correlation softmax for encoder self attention
@thread
gridDim.x = batch_size
gridDim.y = head_num * dst_seq_len
blockDim.x = src_seq_len
@param
correlation: [batch_size, head_num, dst_seq_len, src_seq_len]
real_seq_len: [batch_size]
*/
template <typename T>
__global__ void ker_attention_mask_weights(T* correlation,
const int* real_seq_len,
int dst_seq_len, int src_seq_len) {
int query_token_pos = blockIdx.y % dst_seq_len + src_seq_len - dst_seq_len;
if (query_token_pos >= real_seq_len[blockIdx.x]) {
return;
}
int mask = 0; // can see the token when mask=0
if (threadIdx.x > query_token_pos) {
mask = 1; // Can only see the token on the left side of it
}
int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
float val = (float)correlation[idx];
float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val);
__shared__ float smax;
if (threadIdx.x == 0) smax = max_val;
__syncthreads();
val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax));
float rsum = blockReduceSum<float>(val);
__shared__ float ssum;
if (threadIdx.x == 0) ssum = rsum;
__syncthreads();
correlation[idx] = (T)(val / (ssum + epsilon));
}
template <typename T>
void ker_attention_mask_weights_launcher(int batch_size, int dst_seq_len,
int src_seq_len, int head_num,
cudaStream_t stream, T* correlation,
const int* real_seq_len) {
ker_attention_mask_weights<T>
<<<dim3(batch_size, head_num * dst_seq_len), src_seq_len, 0, stream>>>(
correlation, real_seq_len, dst_seq_len, src_seq_len);
}
template void ker_attention_mask_weights_launcher<float>(
int batch_size, int dst_seq_len, int src_seq_len, int head_num,
cudaStream_t stream, float* correlation, const int* real_seq_len);
template void ker_attention_mask_weights_launcher<__half>(
int batch_size, int dst_seq_len, int src_seq_len, int head_num,
cudaStream_t stream, __half* correlation, const int* real_seq_len);
/**
@brief: ker_arrange_qkv_with_cache
split and reshape ori_qkv matrix into new_q, new_k, new_v during encoder
self-attention
ori_qkv is the result of gemm
@thread
gridDim.x = batch_size * batch_seq_len
gridDim.y = 3
blockDim.x = hidden_size
@param
ori_qkv: [batch_size, 1, 3, hidden_size]
qkv_bias: [3, hidden_size]
new_q: [batch_size, head_num, 1, dim_per_head]
max_batch_dim: max_batch_size * max_seq_len * hidden_size
batch_seq_len: the sequence length of the current batch
dim_per_head: dim of one head in multi-head attention
head_num: head number in multi-head attention
*/
template <typename T>
__global__ void ker_arrange_qkv_with_cache(const T* ori_qkv, const T* qkv_bias,
T* new_q, T* new_k, T* k_cache,
T* new_v, T* v_cache,
int max_batch_dim, int batch_seq_len,
int dim_per_head, int head_num) {
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int head_id = threadIdx.x / dim_per_head;
int dim_id = threadIdx.x % dim_per_head;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
T new_val;
if (token_id < batch_seq_len - 1) {
int old_target_id =
targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len - 1, dim_per_head);
if (blockIdx.y == 0) return;
if (blockIdx.y == 1) new_val = k_cache[old_target_id];
if (blockIdx.y == 2) new_val = v_cache[old_target_id];
} else {
new_val = ori_qkv[(batch_id * gridDim.y + blockIdx.y) * blockDim.x +
threadIdx.x] +
__ldg(&qkv_bias[blockIdx.y * blockDim.x + threadIdx.x]);
if (blockIdx.y == 0) {
target_id = targetid_4dim(batch_id, head_id, 0, dim_id, head_num, 1,
dim_per_head);
}
}
if (blockIdx.y == 0) new_q[target_id] = new_val;
if (blockIdx.y == 1) new_k[target_id] = new_val;
if (blockIdx.y == 2) new_v[target_id] = new_val;
}
template <>
__global__ void ker_arrange_qkv_with_cache<__half>(
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num) {
int batch_id = blockIdx.x / batch_seq_len;
int token_id = blockIdx.x % batch_seq_len;
int head_id = threadIdx.x / dim_per_head;
int dim_id = threadIdx.x % dim_per_head;
int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len, dim_per_head);
half2 new_val;
const half2* p_ori_qkv = (const half2*)ori_qkv;
const half2* p_bias = (const half2*)qkv_bias;
const half2* p_k_cache = (const half2*)k_cache;
const half2* p_v_cache = (const half2*)v_cache;
half2* p_new_q = (half2*)new_q;
half2* p_new_k = (half2*)new_k;
half2* p_new_v = (half2*)new_v;
if (token_id < batch_seq_len - 1) {
int old_target_id =
targetid_4dim(batch_id, head_id, token_id, dim_id, head_num,
batch_seq_len - 1, dim_per_head);
if (blockIdx.y == 0) return;
if (blockIdx.y == 1) new_val = p_k_cache[old_target_id];
if (blockIdx.y == 2) new_val = p_v_cache[old_target_id];
} else {
new_val =
__hadd2(p_ori_qkv[(batch_id * gridDim.y + blockIdx.y) * blockDim.x +
threadIdx.x],
__ldg(&p_bias[blockIdx.y * blockDim.x + threadIdx.x]));
if (blockIdx.y == 0) {
target_id = targetid_4dim(batch_id, head_id, 0, dim_id, head_num, 1,
dim_per_head);
}
}
if (blockIdx.y == 0) p_new_q[target_id] = new_val;
if (blockIdx.y == 1) p_new_k[target_id] = new_val;
if (blockIdx.y == 2) p_new_v[target_id] = new_val;
}
template <typename T>
void ker_arrange_qkv_with_cache_launcher(int batch_token_num, int hidden_size,
cudaStream_t stream, const T* ori_qkv,
const T* qkv_bias, T* new_q, T* new_k,
T* k_cache, T* new_v, T* v_cache,
int max_batch_dim, int batch_seq_len,
int dim_per_head, int head_num) {
ker_arrange_qkv_with_cache<T>
<<<dim3(batch_token_num, 3), hidden_size, 0, stream>>>(
ori_qkv, qkv_bias, new_q, new_k, k_cache, new_v, v_cache,
max_batch_dim, batch_seq_len, dim_per_head, head_num);
}
template <>
void ker_arrange_qkv_with_cache_launcher<__half>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num) {
ker_arrange_qkv_with_cache<__half>
<<<dim3(batch_token_num, 3), hidden_size / 2, 0, stream>>>(
ori_qkv, qkv_bias, new_q, new_k, k_cache, new_v, v_cache,
max_batch_dim / 2, batch_seq_len, dim_per_head / 2, head_num);
}
template void ker_arrange_qkv_with_cache_launcher<float>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const float* ori_qkv, const float* qkv_bias, float* new_q, float* new_k,
float* k_cache, float* new_v, float* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num);
template void ker_arrange_qkv_with_cache_launcher<__half>(
int batch_token_num, int hidden_size, cudaStream_t stream,
const __half* ori_qkv, const __half* qkv_bias, __half* new_q, __half* new_k,
__half* k_cache, __half* new_v, __half* v_cache, int max_batch_dim,
int batch_seq_len, int dim_per_head, int head_num);
/**
@brief: ker_ppl
compute ppl from logit
ppl = - (1 / n) * sum(log(i|i-1...))
one thread block compute log probability for the given token
@thread
gridDim.x = batch_size
gridDim.y = batch_seq_len
blockDim.x = max_thread_per_block
@param
logits: [batch_size, batch_seq_len, vocab_size]
input_ids: [batch_size, batch_seq_len]
real_seq_len: [batch_size]
ppl: [batch_size]
*/
template <typename T>
__global__ void ker_ppl(const T* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size) {
int seq_len = real_seq_len[blockIdx.x]; // remove "eos"
if (blockIdx.y >= seq_len) {
// will not contribute to ppl
return;
}
int token_idx_in_batch = blockIdx.x * gridDim.y + blockIdx.y;
int left_logit_idx = token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (token_idx_in_batch + 1) * vocab_size;
/*
step 1. find max logit over the whole vocab
*/
float max_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
max_logit = fmaxf(max_logit, (float)logits[idx]);
}
max_logit = blockReduceMax(max_logit);
__shared__ float s_max_logit;
if (threadIdx.x == 0) {
s_max_logit = max_logit;
}
__syncthreads();
/*
step 2. compute the log probability for the given token,
add it to the sequence's ppl
*/
float sum_exp_logit = 0.f;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float lgt = fmaxf((float)logits[idx] - s_max_logit, logit_thresh_min);
sum_exp_logit += expf(lgt);
}
sum_exp_logit = blockReduceSum(sum_exp_logit);
if (threadIdx.x == 0) {
int token_id = input_ids[token_idx_in_batch + 1];
float log_prob =
((float)logits[token_idx_in_batch * vocab_size + token_id] -
s_max_logit - logf(sum_exp_logit)) /
(float)seq_len;
atomicAdd(ppl + blockIdx.x, -log_prob);
}
}
template <typename T>
void ker_ppl_launcher(int batch_size, int batch_seq_len,
int max_thread_per_block, cudaStream_t stream,
const T* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size) {
ker_ppl<T>
<<<dim3(batch_size, batch_seq_len), max_thread_per_block, 0, stream>>>(
logits, input_ids, real_seq_len, ppl, vocab_size);
}
template void ker_ppl_launcher<float>(int batch_size, int batch_seq_len,
int max_thread_per_block,
cudaStream_t stream, const float* logits,
const int* input_ids,
const int* real_seq_len, float* ppl,
int vocab_size);
template void ker_ppl_launcher<__half>(
int batch_size, int batch_seq_len, int max_thread_per_block,
cudaStream_t stream, const __half* logits, const int* input_ids,
const int* real_seq_len, float* ppl, int vocab_size);
/**
@brief: ker_topk_sample
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
real_seq_len: [batch_size]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T, int k>
__global__ void ker_topk_sample(const T* logits, int* old_input_ids,
int* new_input_ids, const int* real_seq_len,
const int vocab_size, const int batch_seq_len,
int logits_seq_len, int* unfinished,
curandState* curandstate, int eos_id) {
int last_token_idx_in_batch = blockIdx.x * batch_seq_len + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (old_input_ids[last_token_idx_in_batch] == eos_id) {
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
// blockIdx.x * (batch_seq_len+1) + batch_seq_len
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = eos_id;
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/*
step1. find max logit and rough Kth logit over the whole vocab
*/
__shared__ float s_max_logit, s_topk_logit;
float rough_top_kth_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
rough_top_kth_logit = fmaxf(rough_top_kth_logit, (float)logits[idx]);
}
float max_logit = blockReduceMax(rough_top_kth_logit);
rough_top_kth_logit = blockRoughTopK<float, k>(rough_top_kth_logit);
if (threadIdx.x == 0) {
s_topk_logit = rough_top_kth_logit;
s_max_logit = max_logit;
}
__syncthreads();
/* step2 hold one logit per thread which larger than Kth logit and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx];
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
if (logit >= s_topk_logit) test_num++;
if (logit >= s_topk_logit && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
test_num = blockReduceSum(test_num);
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
typedef cub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = curand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
__shared__ int s_tid;
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 copy old_input_ids to new_input_ids and add new sampled ids */
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = s_tid;
// save the newly sampled ids to old_input_ids for next step inputs
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = s_tid;
}
}
template <typename T>
void ker_topk_sample_launcher(int batch_size, int batch_seq_len,
int logits_seq_len, int max_thread_per_block,
cudaStream_t stream, const T* logits,
int* old_input_ids, int* new_input_ids,
const int* real_seq_len, const int vocab_size,
const int k, int* unfinished,
curandState* curandstate, int eos_id) {
if (k == 1)
ker_topk_sample<T, 1><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 2)
ker_topk_sample<T, 2><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 4)
ker_topk_sample<T, 4><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 8)
ker_topk_sample<T, 8><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 16)
ker_topk_sample<T, 16><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else if (k == 32)
ker_topk_sample<T, 32><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id);
else {
throw std::invalid_argument("topk argument should be in [1,2,4,8,16,32]");
}
}
template void ker_topk_sample_launcher<float>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const float* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const int k, int* unfinished,
curandState* curandstate, int eos_id);
template void ker_topk_sample_launcher<__half>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const __half* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const int k, int* unfinished,
curandState* curandstate, int eos_id);
/**
@brief: ker_topp_sample
@thread
gridDim.x = batch_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, logits_seq_len, vocab_size]
old_input_ids: [batch_size, batch_seq_len]
new_input_ids: [batch_size, batch_seq_len+1]
real_seq_len: [batch_size]
unfinished: [1]
curandstate: [batch_size]
*/
template <typename T>
__global__ void ker_topp_sample(const T* logits, int* old_input_ids,
int* new_input_ids, const int* real_seq_len,
const int vocab_size, const int batch_seq_len,
int logits_seq_len, int* unfinished, float p,
curandState* curandstate, int eos_id) {
int token_idx_in_batch = blockIdx.x * batch_seq_len + batch_seq_len - 1;
/* add EOS to end if last token is EOS */
if (old_input_ids[token_idx_in_batch] == eos_id) {
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = eos_id;
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = eos_id;
}
return;
}
int logits_token_idx_in_batch =
blockIdx.x * logits_seq_len + logits_seq_len - 1;
int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x;
int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size;
/*
step1. find max logit in each thread and sample from these probs with nucleus
sampling
*/
__shared__ float s_max_logit;
float max_logit = CUDA_FLOAT_INF_NEG;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
max_logit = fmaxf(max_logit, (float)logits[idx]);
}
float max_logit_array[1];
max_logit_array[0] = max_logit;
typedef cub::BlockRadixSort<float, 1024, 1> BlockRadixSort;
__shared__ typename BlockRadixSort::TempStorage sort_temp_storage;
BlockRadixSort(sort_temp_storage).SortDescending(max_logit_array);
float presum_max_logit_exp;
max_logit = max_logit_array[0];
float block_max_logit = blockReduceMax(max_logit);
if (threadIdx.x == 0) {
s_max_logit = block_max_logit;
}
__syncthreads();
float biased_logit_exp =
expf(fmaxf(max_logit - s_max_logit, logit_thresh_min));
typedef cub::BlockScan<float, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage presum_temp_storage;
BlockScan(presum_temp_storage)
.InclusiveSum(biased_logit_exp, presum_max_logit_exp);
float topp_exp_threshold;
if (threadIdx.x == blockDim.x - 1) {
topp_exp_threshold = p * presum_max_logit_exp;
}
__shared__ float s_presum_logit_exp_threshold;
if (presum_max_logit_exp > topp_exp_threshold) {
presum_max_logit_exp = CUDA_FLOAT_INF_NEG;
}
float logit_exp_threshold = blockReduceMax(presum_max_logit_exp);
if (threadIdx.x == 0) {
s_presum_logit_exp_threshold = logit_exp_threshold;
}
__syncthreads();
__shared__ float s_logit_threshold;
if (presum_max_logit_exp == s_presum_logit_exp_threshold) {
s_logit_threshold = max_logit;
}
__syncthreads();
/* step2 hold one logit per thread and sample
* from them */
float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG;
int topk_tid = vocab_size;
int test_num = 0;
__shared__ float s_topk_exp_sum;
for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) {
float logit = (float)logits[idx];
float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min));
if (logit >= s_logit_threshold) test_num++;
if (logit >= s_logit_threshold && logit_exp > topk_exp) {
topk_exp = logit_exp;
topk_tid = idx - left_logit_idx + threadIdx.x;
}
}
test_num = blockReduceSum(test_num);
if (topk_tid == vocab_size) topk_exp = 0;
topk_exp_sum = blockReduceSum(topk_exp);
if (threadIdx.x == 0) {
s_topk_exp_sum = topk_exp_sum;
}
__syncthreads();
/* calculate cumulative probability */
float topk_prob = topk_exp / s_topk_exp_sum;
float prefix_sum_prob;
BlockScan(presum_temp_storage).InclusiveSum(topk_prob, prefix_sum_prob);
__shared__ float random_x;
if (threadIdx.x == 0) {
random_x = curand_uniform(curandstate + blockIdx.x);
}
__syncthreads();
__shared__ int s_tid;
if (threadIdx.x == 0) {
s_tid = vocab_size;
}
__syncthreads();
int threadID = threadIdx.x;
__shared__ int s_threadID;
__shared__ float s_max_prob;
if (random_x > prefix_sum_prob) threadID = blockDim.x;
threadID = blockReduceMin(threadID);
float max_prob = blockReduceMax(topk_prob);
if (threadIdx.x == 0) {
s_threadID = threadID;
s_max_prob = max_prob;
}
__syncthreads();
if (threadIdx.x == s_threadID) {
s_tid = topk_tid;
}
__syncthreads();
if (s_tid == vocab_size && topk_prob == s_max_prob) {
s_tid = topk_tid;
}
__syncthreads();
/* if new sampled tid is not EOS, set unfinish TRUE */
if (threadIdx.x == 0) {
if (s_tid != eos_id) unfinished[0] = 1;
}
/* step3 copy old_input_ids to new_input_ids and add new sampled ids */
int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x;
int right_token_idx = (blockIdx.x + 1) * batch_seq_len;
for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) {
int new_idx = idx + blockIdx.x;
new_input_ids[new_idx] = old_input_ids[idx];
}
if (threadIdx.x == 0) {
new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = s_tid;
// save the newly sampled ids to old_input_ids for next step inputs
old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = s_tid;
}
}
template <typename T>
void ker_topp_sample_launcher(int batch_size, int batch_seq_len,
int logits_seq_len, int max_thread_per_block,
cudaStream_t stream, const T* logits,
int* old_input_ids, int* new_input_ids,
const int* real_seq_len, const int vocab_size,
const float p, int* unfinished,
curandState* curandstate, int eos_id) {
ker_topp_sample<T><<<batch_size, max_thread_per_block, 0, stream>>>(
logits, old_input_ids, new_input_ids, real_seq_len, vocab_size,
batch_seq_len, logits_seq_len, unfinished, p, curandstate, eos_id);
}
template void ker_topp_sample_launcher<float>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const float* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const float p, int* unfinished,
curandState* curandstate, int eos_id);
template void ker_topp_sample_launcher<__half>(
int batch_size, int batch_seq_len, int logits_seq_len,
int max_thread_per_block, cudaStream_t stream, const __half* logits,
int* old_input_ids, int* new_input_idx, const int* real_seq_len,
const int vocab_size, const float p, int* unfinished,
curandState* curandstate, int eos_id);
} // namespace cuda
} // namespace lightseq
|
51dedfcecde88442d98a547bb9f1c257ae48ef02.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cblas.h>
#include <rocblas.h>
//##########################################################
// In code timer function
//##########################################################
void init_timing(struct timeval* tstart)
{
gettimeofday(tstart, NULL);
}
float ellapsed_time(struct timeval tstart)
{
struct timeval tmp;
long long diff;
gettimeofday(&tmp, NULL);
diff = tmp.tv_usec - tstart.tv_usec;
diff += (tmp.tv_sec - tstart.tv_sec) * 1000000;
return ((float)diff*1.0e-6);
}
//##########################################################
extern "C"
{
void blas_fct(void);
}
void blas_fct(void)
{
int i;
int N = 8192;
float *A, *B, *C;
float *d_A, *d_B, *d_C;
float alpha = 1.0, beta=0.0;
struct timeval timer;
hipblasHandle_t cu_handle;
if(hipblasCreate(&cu_handle) != HIPBLAS_STATUS_SUCCESS)
{
printf("GPU handle create fail\n");
exit(EXIT_FAILURE);
}
A = (float*) malloc(N*N*sizeof(float));
B = (float*) malloc(N*N*sizeof(float));
C = (float*) malloc(N*N*sizeof(float));
for(i = 0; i < N*N; i++)
{
A[i] = (i%50) * 0.1;
B[i] = (i%25) * 1.3;
C[i] = 0.0;
}
hipMalloc(&d_A, N*N*sizeof(float));
hipMalloc(&d_B, N*N*sizeof(float));
hipMalloc(&d_C, N*N*sizeof(float));
hipMemcpy(d_A, A, N*N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, N*N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_C, C, N*N*sizeof(float), hipMemcpyHostToDevice);
init_timing(&timer);
cblas_sgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N);
printf("CPU time : %f\n", ellapsed_time(timer));
printf("%f\n", C[N*N/2 + N/2]);
for(i = 0; i < N*N; i++)
{
C[i] = 0.0;
}
init_timing(&timer);
hipblasSgemm(cu_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
hipDeviceSynchronize();
printf("CUDA time : %f\n", ellapsed_time(timer));
hipMemcpy(C, d_C, N*N*sizeof(float), hipMemcpyDeviceToHost);
printf("%f\n", C[N*N/2 + N/2]);
free(A);
free(B);
free(C);
}
| 51dedfcecde88442d98a547bb9f1c257ae48ef02.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cblas.h>
#include <cublas_v2.h>
//##########################################################
// In code timer function
//##########################################################
void init_timing(struct timeval* tstart)
{
gettimeofday(tstart, NULL);
}
float ellapsed_time(struct timeval tstart)
{
struct timeval tmp;
long long diff;
gettimeofday(&tmp, NULL);
diff = tmp.tv_usec - tstart.tv_usec;
diff += (tmp.tv_sec - tstart.tv_sec) * 1000000;
return ((float)diff*1.0e-6);
}
//##########################################################
extern "C"
{
void blas_fct(void);
}
void blas_fct(void)
{
int i;
int N = 8192;
float *A, *B, *C;
float *d_A, *d_B, *d_C;
float alpha = 1.0, beta=0.0;
struct timeval timer;
cublasHandle_t cu_handle;
if(cublasCreate(&cu_handle) != CUBLAS_STATUS_SUCCESS)
{
printf("GPU handle create fail\n");
exit(EXIT_FAILURE);
}
A = (float*) malloc(N*N*sizeof(float));
B = (float*) malloc(N*N*sizeof(float));
C = (float*) malloc(N*N*sizeof(float));
for(i = 0; i < N*N; i++)
{
A[i] = (i%50) * 0.1;
B[i] = (i%25) * 1.3;
C[i] = 0.0;
}
cudaMalloc(&d_A, N*N*sizeof(float));
cudaMalloc(&d_B, N*N*sizeof(float));
cudaMalloc(&d_C, N*N*sizeof(float));
cudaMemcpy(d_A, A, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, N*N*sizeof(float), cudaMemcpyHostToDevice);
init_timing(&timer);
cblas_sgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N);
printf("CPU time : %f\n", ellapsed_time(timer));
printf("%f\n", C[N*N/2 + N/2]);
for(i = 0; i < N*N; i++)
{
C[i] = 0.0;
}
init_timing(&timer);
cublasSgemm(cu_handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
cudaDeviceSynchronize();
printf("CUDA time : %f\n", ellapsed_time(timer));
cudaMemcpy(C, d_C, N*N*sizeof(float), cudaMemcpyDeviceToHost);
printf("%f\n", C[N*N/2 + N/2]);
free(A);
free(B);
free(C);
}
|
53abcb322f07767e3989a61c5baafb49f8116a92.hip | // !!! This is a file automatically generated by hipify!!!
/**
Example of main class doing opertations on matrices. This class takes a premade matrces and converts them to 1D array for GPU operations.
**/
#include <stdio.h> /* printf, NULL */
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "ISplit.h"
/////////////// MACROS and GLOBALS: //////////////
#define N 100000000
#define BLOCK_SIZE 32
#define oneGB 100000000
long gMem; int gSize[3]; int wSize; int TPB;//max threads per block
/////////////////////////////
typedef struct{
unsigned long long M;//size
unsigned int p; //partition
unsigned int overflow; //overflow
float * vec;
}VECTOR;
//////////////////////////////////////////////////////////
int CheckR(long long* a1, long long* b1, unsigned long long nm, long long* c);
int CheckI(float *vv, unsigned long s, float *&c);
void randomInit(float* &data)
{
#pragma unroll
for (int i = 0; i <= N; i++){
data[i] = rand()% (1000 + 1 - 1) + 1;
}
}
//////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv){
//Setup Check//
int Dev = 0;
hipDeviceProp_t pp;
setProp(Dev);
pp = getProp();
hipEvent_t start1,stop1;
float time1;
gpuErrchk(hipEventCreate(&start1));
gpuErrchk(hipEventCreate(&stop1));
gpuErrchk(hipEventRecord(start1,0));
gMem = pp.totalGlobalMem;
gSize[0] = pp.maxGridSize[0]; gSize[1] = pp.maxGridSize[1]; gSize[2] = pp.maxGridSize[2];
wSize = pp.warpSize;
TPB = pp.maxThreadsPerBlock;
// printf("total Global mem: %ld\n", gMem);
// printf("maxGridSize= %d,%d,%d \n",gSize[0],gSize[1],gSize[2]);
// printf("Warp Size: %d\n", wSize);
// printf(" TPB: %d\n", TPB);
//-----------------------------------------------------------
srand(356);
printf("Initialised\n");
VECTOR v;
v.M = N;
v.p =2;
v.overflow = 0;
float * c;
unsigned long byteSize = (N*sizeof(unsigned long long));
//Host
gpuErrchk(hipHostMalloc((void**)&v.vec,((v.M)*sizeof(unsigned long long)),hipHostMallocDefault));
gpuErrchk(hipHostMalloc((void**)&c,((v.M)*sizeof(unsigned long long)),hipHostMallocDefault));
randomInit(v.vec);
printf("Size of vec= %lu \n", byteSize);
printf("----------------Split up vector-------------------------\n");
/*------------Basic Generic CUDA Setup------------------- */
unsigned long long Nn = ceil(v.M / v.p);
unsigned long long bt = (long long)byteSize/v.p;
unsigned long long mem = (long long) (gMem-oneGB);
//printf("Nn=%llu, bt=%llu, mem=%llu",Nn,bt,mem);
while((bt)>mem){
v.p += 2;
bt = (long long)byteSize/v.p;
Nn = v.M/v.p;
v.overflow = v.M%v.p;
}
dim3 BLOCK(BLOCK_SIZE);
dim3 GRID(Nn+BLOCK.x-1/BLOCK.x);
//printf("GRID(%lu,%d,%d), BLOCK(%d,%d,%d)\n",GRID.x,GRID.y,GRID.z,BLOCK.x,BLOCK.y,BLOCK.z);
//printf("partition = %lu\n",v.p);
hipStream_t stream0;
hipEvent_t start,stop;
float time;
gpuErrchk(hipEventCreate(&start));
gpuErrchk(hipEventCreate(&stop));
gpuErrchk( hipStreamCreate( &stream0));
//Timer START LETS GOOO!
gpuErrchk(hipEventRecord(start,0));
//malloc
float * aC;
gpuErrchk(hipMalloc((void**)&aC, (Nn*sizeof( unsigned long long))));
//----------------------START LOOP--------------------------------//
for (unsigned long long i = 0; i <= v.M-v.overflow; i+=Nn){
gpuErrchk(hipMemcpyAsync(aC,v.vec+i,(Nn*sizeof(unsigned long long)),hipMemcpyHostToDevice,stream0));
hipLaunchKernelGGL(( Incr), dim3(GRID),dim3(BLOCK),0,stream0, aC,Nn,i);
gpuErrchk(hipMemcpyAsync(c+i,aC,(Nn*sizeof(unsigned long long)),hipMemcpyDeviceToHost,stream0)); //i = N;
}
if (v.overflow)
{
gpuErrchk(hipMemcpyAsync(aC,v.vec+(v.M-v.overflow),(v.overflow*sizeof(unsigned long long)),hipMemcpyHostToDevice,stream0));
hipLaunchKernelGGL(( Incr), dim3(GRID),dim3(BLOCK),0,stream0, aC,v.overflow,v.overflow);
gpuErrchk(hipMemcpyAsync(c+(v.M-v.overflow),aC,(v.overflow*sizeof(unsigned long long)),hipMemcpyDeviceToHost,stream0));
}
//----------------------END LOOP--------------------------------//
gpuErrchk(hipStreamSynchronize(stream0)); // Tell CPU to hold his horses and wait
hipDeviceSynchronize();
gpuErrchk(hipEventRecord(stop,0));
gpuErrchk(hipEventSynchronize(stop));
gpuErrchk(hipEventElapsedTime(&time, start, stop));
printf("Time Taken: %3.1f ms \n",time);
gpuErrchk(hipStreamDestroy(stream0));
gpuErrchk(hipEventDestroy(start));
gpuErrchk(hipEventDestroy(stop));
printf("1 Stream\n");
printf("\n freeing all vectors from memory\n");
//CheckI(v.vec,v.M,c);
gpuErrchk( hipHostFree( v.vec ) );
gpuErrchk( hipFree( aC ) );
gpuErrchk(hipEventRecord(stop1,0));
gpuErrchk(hipEventSynchronize(stop1));
gpuErrchk(hipEventElapsedTime(&time1, start1, stop1));
gpuErrchk(hipEventDestroy(stop1));
gpuErrchk(hipEventDestroy(start1));
printf("Parallel Time Taken: %3.1f ms \n",time);
printf("Full Time Taken: %6f seconds \n",time1/1000.0000);
return 0;
}
int CheckI(float * vv, unsigned long s, float *&c){
for (int i = 0; i <=s ; ++i)
{
vv[i]+=1;
if (vv[i]!=c[i])
{
printf("vv[%d]= %f, but c = %f\n",i,vv[i],c[i]);
return(0);
}
}
return (42);
}
| 53abcb322f07767e3989a61c5baafb49f8116a92.cu | /**
Example of main class doing opertations on matrices. This class takes a premade matrces and converts them to 1D array for GPU operations.
**/
#include <stdio.h> /* printf, NULL */
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <cuda.h>
#include <cuda_runtime.h>
#include "ISplit.h"
/////////////// MACROS and GLOBALS: //////////////
#define N 100000000
#define BLOCK_SIZE 32
#define oneGB 100000000
long gMem; int gSize[3]; int wSize; int TPB;//max threads per block
/////////////////////////////
typedef struct{
unsigned long long M;//size
unsigned int p; //partition
unsigned int overflow; //overflow
float * vec;
}VECTOR;
//////////////////////////////////////////////////////////
int CheckR(long long* a1, long long* b1, unsigned long long nm, long long* c);
int CheckI(float *vv, unsigned long s, float *&c);
void randomInit(float* &data)
{
#pragma unroll
for (int i = 0; i <= N; i++){
data[i] = rand()% (1000 + 1 - 1) + 1;
}
}
//////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv){
//Setup Check//
int Dev = 0;
cudaDeviceProp pp;
setProp(Dev);
pp = getProp();
cudaEvent_t start1,stop1;
float time1;
gpuErrchk(cudaEventCreate(&start1));
gpuErrchk(cudaEventCreate(&stop1));
gpuErrchk(cudaEventRecord(start1,0));
gMem = pp.totalGlobalMem;
gSize[0] = pp.maxGridSize[0]; gSize[1] = pp.maxGridSize[1]; gSize[2] = pp.maxGridSize[2];
wSize = pp.warpSize;
TPB = pp.maxThreadsPerBlock;
// printf("total Global mem: %ld\n", gMem);
// printf("maxGridSize= %d,%d,%d \n",gSize[0],gSize[1],gSize[2]);
// printf("Warp Size: %d\n", wSize);
// printf(" TPB: %d\n", TPB);
//-----------------------------------------------------------
srand(356);
printf("Initialised\n");
VECTOR v;
v.M = N;
v.p =2;
v.overflow = 0;
float * c;
unsigned long byteSize = (N*sizeof(unsigned long long));
//Host
gpuErrchk(cudaHostAlloc((void**)&v.vec,((v.M)*sizeof(unsigned long long)),cudaHostAllocDefault));
gpuErrchk(cudaHostAlloc((void**)&c,((v.M)*sizeof(unsigned long long)),cudaHostAllocDefault));
randomInit(v.vec);
printf("Size of vec= %lu \n", byteSize);
printf("----------------Split up vector-------------------------\n");
/*------------Basic Generic CUDA Setup------------------- */
unsigned long long Nn = ceil(v.M / v.p);
unsigned long long bt = (long long)byteSize/v.p;
unsigned long long mem = (long long) (gMem-oneGB);
//printf("Nn=%llu, bt=%llu, mem=%llu",Nn,bt,mem);
while((bt)>mem){
v.p += 2;
bt = (long long)byteSize/v.p;
Nn = v.M/v.p;
v.overflow = v.M%v.p;
}
dim3 BLOCK(BLOCK_SIZE);
dim3 GRID(Nn+BLOCK.x-1/BLOCK.x);
//printf("GRID(%lu,%d,%d), BLOCK(%d,%d,%d)\n",GRID.x,GRID.y,GRID.z,BLOCK.x,BLOCK.y,BLOCK.z);
//printf("partition = %lu\n",v.p);
cudaStream_t stream0;
cudaEvent_t start,stop;
float time;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk( cudaStreamCreate( &stream0));
//Timer START LETS GOOO!
gpuErrchk(cudaEventRecord(start,0));
//malloc
float * aC;
gpuErrchk(cudaMalloc((void**)&aC, (Nn*sizeof( unsigned long long))));
//----------------------START LOOP--------------------------------//
for (unsigned long long i = 0; i <= v.M-v.overflow; i+=Nn){
gpuErrchk(cudaMemcpyAsync(aC,v.vec+i,(Nn*sizeof(unsigned long long)),cudaMemcpyHostToDevice,stream0));
Incr<<<GRID,BLOCK,0,stream0>>>(aC,Nn,i);
gpuErrchk(cudaMemcpyAsync(c+i,aC,(Nn*sizeof(unsigned long long)),cudaMemcpyDeviceToHost,stream0)); //i = N;
}
if (v.overflow)
{
gpuErrchk(cudaMemcpyAsync(aC,v.vec+(v.M-v.overflow),(v.overflow*sizeof(unsigned long long)),cudaMemcpyHostToDevice,stream0));
Incr<<<GRID,BLOCK,0,stream0>>>(aC,v.overflow,v.overflow);
gpuErrchk(cudaMemcpyAsync(c+(v.M-v.overflow),aC,(v.overflow*sizeof(unsigned long long)),cudaMemcpyDeviceToHost,stream0));
}
//----------------------END LOOP--------------------------------//
gpuErrchk(cudaStreamSynchronize(stream0)); // Tell CPU to hold his horses and wait
cudaDeviceSynchronize();
gpuErrchk(cudaEventRecord(stop,0));
gpuErrchk(cudaEventSynchronize(stop));
gpuErrchk(cudaEventElapsedTime(&time, start, stop));
printf("Time Taken: %3.1f ms \n",time);
gpuErrchk(cudaStreamDestroy(stream0));
gpuErrchk(cudaEventDestroy(start));
gpuErrchk(cudaEventDestroy(stop));
printf("1 Stream\n");
printf("\n freeing all vectors from memory\n");
//CheckI(v.vec,v.M,c);
gpuErrchk( cudaFreeHost( v.vec ) );
gpuErrchk( cudaFree( aC ) );
gpuErrchk(cudaEventRecord(stop1,0));
gpuErrchk(cudaEventSynchronize(stop1));
gpuErrchk(cudaEventElapsedTime(&time1, start1, stop1));
gpuErrchk(cudaEventDestroy(stop1));
gpuErrchk(cudaEventDestroy(start1));
printf("Parallel Time Taken: %3.1f ms \n",time);
printf("Full Time Taken: %6f seconds \n",time1/1000.0000);
return 0;
}
int CheckI(float * vv, unsigned long s, float *&c){
for (int i = 0; i <=s ; ++i)
{
vv[i]+=1;
if (vv[i]!=c[i])
{
printf("vv[%d]= %f, but c = %f\n",i,vv[i],c[i]);
return(0);
}
}
return (42);
}
|
62c87ec917913b524ac8c7d503d54dbe6455610d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <cutil.h>
#include <gpuewrepot_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
extern "C"
void gpuewrepot_ (double*, int, double*, double, int, double, int, int, double*);
void gpuewrepot__ (double*, int, double*, double, int, double, int, int, double*);
extern "C"
//void ewrepotGold( float*, const double*, const double*, unsigned int, double);
//void ewrepotGold_d( double*, const double*, const double*, unsigned int, double);
void ewrepotGold_f( float*, const float*, unsigned int, float, float);
////////////////////////////////////////////////////////////////////////////////
//extern "C"
void
gpuewrepot_ (double* x, int n, double* q, double rscale, int tblno, double xmax, int periodicflag, int natchangeflag, double* force)
{
#ifndef CUDA_SDK_2
CUT_DEVICE_INIT();
#endif
// CUT_CHECK_DEVICE();
unsigned int size_A = ((n+THD-1)/THD*THD) * 4;
unsigned int mem_size_A = sizeof(float) * size_A;
float* x_float = (float*) malloc(mem_size_A);
//double stime,ltime;
for (int i = 0; i < size_A/4; i++){
if(i<n){
x_float[i*4] = (float)x[i*3];
x_float[i*4+1] = (float)x[i*3+1];
x_float[i*4+2] = (float)x[i*3+2];
x_float[i*4+3] = (float)q[i];
}
else{
x_float[i*4] = 0.0f;
x_float[i*4+1] = 0.0f;
x_float[i*4+2] = 0.0f;
x_float[i*4+3] = 0.0f;
}
}
float xmax_float = (float)xmax;
float alpha_float = (float)rscale;
float* d_A;
CUDA_SAFE_CALL(hipMalloc((void**) &d_A, mem_size_A));
CUDA_SAFE_CALL(hipMemcpy(d_A, x_float, mem_size_A,hipMemcpyHostToDevice) );
unsigned int size_C = ((n+THD-1)/THD*THD) * 3;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C));
float* f_float = (float*) malloc(mem_size_C);
//get_cputime(<ime,&stime);
dim3 threads(THD);
dim3 grid((n+THD-1) / THD);
hipLaunchKernelGGL(( ewrepot_kernel), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, n, xmax_float,alpha_float);
CUT_CHECK_ERROR("Kernel execution failed");
CUDA_SAFE_CALL(hipMemcpy(f_float, d_C, mem_size_C,hipMemcpyDeviceToHost) );
//get_cputime(<ime,&stime);
//printf("GPU Processing time: %10.3f (sec)\n", stime);
//float* reference = (float*) malloc(mem_size_C);
/*
for (int i = 0; i < n; ++i){
reference[i*3] = 0.e0;
reference[i*3+1] = 0.e0;
reference[i*3+2] = 0.e0;
}*/
//get_cputime(<ime,&stime);
//ewrepotGold_f(reference, x_float, n, xmax_float, alpha_float);
/*
unsigned int size_E = n * 3;
unsigned int mem_size_E = size_E * sizeof(double);
double* force_double = (double*) malloc(mem_size_E);
for (int i = 0; i < n; ++i){
force_double[i*3] = 0.e0;
force_double[i*3+1] = 0.e0;
force_double[i*3+2] =0.e0;
}
*/
//ewrepotGold_d(force_double, x, x, n, xmax);
//get_cputime(<ime,&stime);
//printf("HOST Processing time: %10.3f (sec)\n", stime);
//double sum_gpu = 0.e0;
//double sum_host = 0.e0;
for (int i = 0; i < n; ++i){
//sum_gpu += (double)h_C[i*3];
//sum_host += reference[i*3+1];
//printf("%16.6f %16.6f %d \n",f_float[i*3],reference[i*3],i);
force[i*3] += (double)f_float[i*3];
//force[i*3+1] += (double)f_float[i*3+1];
//force[i*3+2] += (double)f_float[i*3+2];
/*
force[i*3] += force_double[i*3];
force[i*3+1] += force_double[i*3+1];
force[i*3+2] += force_double[i*3+2];
*/
}
//printf("GPU : %20.8f \n",sum_gpu);
//printf("GPU : %20.8f \n",f_float[6]);
//printf("HOST: %20.8f \n",reference[6]);
free(x_float);
free(f_float);
//free(reference);
CUDA_SAFE_CALL(hipFree(d_A));
//CUDA_SAFE_CALL(hipFree(d_B));
CUDA_SAFE_CALL(hipFree(d_C));
}
/*
extern "C"
void
gpuvdwpot_ (double* x, int *n, int* atype, int *nat, double* gscale, double* rscale, int *tblno, double *xmax, int *periodicflag, int *natchangeflag, double* force)
{
gpuvdwpot_ (x,*n,atype,*nat,gscale,rscale,*tblno,*xmax,*periodicflag,*natchangeflag,force);
}
*/
extern "C"
void
gpuewrepot__ (double* x, int *n, double* q, double *rscale, int *tblno, double *xmax, int *periodicflag, int *natchangeflag, double* force)
{
gpuewrepot_ (x,*n,q,*rscale,*tblno,*xmax,*periodicflag,*natchangeflag,force);
}
/*
extern "C"
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (data1[k] != data2[k]) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f\n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf("\nTotal Errors = %d\n", error_count);
}
*/
/*
void
ewrepotGold(float* C, const double* A, const double* B, unsigned int num_a, double xmax)
{
double sum;
double dn2;
double dn6;
double dx;
double dy;
double dz;
double l2 = xmax * 0.5;
double exclude_radius2 = 0.1;
for (unsigned int i = 0; i < num_a; ++i){
sum = 0;
for (unsigned int j = 0; j < num_a; ++j) {
dx = (A[i*3] - A[j*3] );
dy = (A[i*3+1] - A[j*3+1]);
dz = (A[i*3+2] - A[j*3+2]);
if (!(dx < l2 && dx > -l2))
if (dx > l2){
dx = dx - xmax;
}else{
dx = dx + xmax;
}
if (!(dy < l2 && dy > -l2))
if (dy > l2){
dy = dy - xmax;
}else{
dy = dy + xmax;
}
if (!(dz < l2 && dz > -l2))
if (dz > l2){
dz = dz - xmax;
}else{
dz = dz + xmax;
}
dn2 = (dx * dx + dy * dy + dz * dz) * 1;
if (dn2 > exclude_radius2){
dn6 = 1.e0 / (dn2 * dn2 * dn2);
sum += 4 * dn6 * (dn6 - 1.e0);
}
}
C[i*3] = (float)sum;
}
}
void
ewrepotGold_d(double* C, const double* A, const double* B, unsigned int num_a, double xmax)
{
double sum;
double dn2;
double dn6;
double dx;
double dy;
double dz;
double l2 = xmax * 0.5;
double exclude_radius2 = 0.01e0;
double cutoff_radius2 = 9.e0;
for (unsigned int i = 0; i < num_a; ++i){
sum = 0.e0;
for (unsigned int j = 0; j < num_a; ++j) {
dx = (A[i*3] - A[j*3] );
dy = (A[i*3+1] - A[j*3+1]);
dz = (A[i*3+2] - A[j*3+2]);
if (!(dx < l2 && dx > -l2))
if (dx > l2){
dx = dx - xmax;
}else{
dx = dx + xmax;
}
if (!(dy < l2 && dy > -l2))
if (dy > l2){
dy = dy - xmax;
}else{
dy = dy + xmax;
}
if (!(dz < l2 && dz > -l2))
if (dz > l2){
dz = dz - xmax;
}else{
dz = dz + xmax;
}
dn2 = dx * dx + dy * dy + dz * dz;
if ((i != j) && dn2 < cutoff_radius2){
dn6 = 1.e0 / dn2 / dn2 / dn2 / dn2 / dn2 / dn2
- 1.e0 / dn2 / dn2 / dn2;
sum += dn6 * 4.e0;
}
dn2 = (dx * dx + dy * dy + dz * dz) * 1;
if (dn2 > exclude_radius2 && dn2 < cutoff_radius2){
dn6 = 1.e0 / (dn2 * dn2 * dn2);
sum += 4 * dn6 * (dn6 - 1.e0);
}
}
C[i*3] = sum;
}
}
*/
void
ewrepotGold_f(float* C, const float* A, unsigned int num_a, float xmax, float alpha)
{
float sum;
float dn2;
float dx;
float dy;
float dz;
float l2 = xmax * 0.5;
float sqdn;
//float exclude_radius2 = 0.01e0;
//float cutoff_radius2 = 9.e0;
for (unsigned int i = 0; i < num_a; ++i){
sum = 0.e0;
for (unsigned int j = 0; j < num_a; ++j) {
dx = (A[i*4] - A[j*4] );
dy = (A[i*4+1] - A[j*4+1]);
dz = (A[i*4+2] - A[j*4+2]);
if (!(dx < l2 && dx > -l2))
if (dx > l2){
dx = dx - xmax;
}else{
dx = dx + xmax;
}
if (!(dy < l2 && dy > -l2))
if (dy > l2){
dy = dy - xmax;
}else{
dy = dy + xmax;
}
if (!(dz < l2 && dz > -l2))
if (dz > l2){
dz = dz - xmax;
}else{
dz = dz + xmax;
}
dn2 = dx * dx + dy * dy + dz * dz;
//if ((i != j) && dn2 < cutoff_radius2){
if ((i != j)){
sqdn = sqrt(dn2) * alpha;
sum += erfc(sqdn) / sqdn * A[j*4+3];
}
}
C[i*3] = sum * A[i*4+3] * alpha;
}
}
| 62c87ec917913b524ac8c7d503d54dbe6455610d.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <cutil.h>
#include <gpuewrepot_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
extern "C"
void gpuewrepot_ (double*, int, double*, double, int, double, int, int, double*);
void gpuewrepot__ (double*, int, double*, double, int, double, int, int, double*);
extern "C"
//void ewrepotGold( float*, const double*, const double*, unsigned int, double);
//void ewrepotGold_d( double*, const double*, const double*, unsigned int, double);
void ewrepotGold_f( float*, const float*, unsigned int, float, float);
////////////////////////////////////////////////////////////////////////////////
//extern "C"
void
gpuewrepot_ (double* x, int n, double* q, double rscale, int tblno, double xmax, int periodicflag, int natchangeflag, double* force)
{
#ifndef CUDA_SDK_2
CUT_DEVICE_INIT();
#endif
// CUT_CHECK_DEVICE();
unsigned int size_A = ((n+THD-1)/THD*THD) * 4;
unsigned int mem_size_A = sizeof(float) * size_A;
float* x_float = (float*) malloc(mem_size_A);
//double stime,ltime;
for (int i = 0; i < size_A/4; i++){
if(i<n){
x_float[i*4] = (float)x[i*3];
x_float[i*4+1] = (float)x[i*3+1];
x_float[i*4+2] = (float)x[i*3+2];
x_float[i*4+3] = (float)q[i];
}
else{
x_float[i*4] = 0.0f;
x_float[i*4+1] = 0.0f;
x_float[i*4+2] = 0.0f;
x_float[i*4+3] = 0.0f;
}
}
float xmax_float = (float)xmax;
float alpha_float = (float)rscale;
float* d_A;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_A, mem_size_A));
CUDA_SAFE_CALL(cudaMemcpy(d_A, x_float, mem_size_A,cudaMemcpyHostToDevice) );
unsigned int size_C = ((n+THD-1)/THD*THD) * 3;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C));
float* f_float = (float*) malloc(mem_size_C);
//get_cputime(<ime,&stime);
dim3 threads(THD);
dim3 grid((n+THD-1) / THD);
ewrepot_kernel<<< grid, threads >>>(d_C, d_A, n, xmax_float,alpha_float);
CUT_CHECK_ERROR("Kernel execution failed");
CUDA_SAFE_CALL(cudaMemcpy(f_float, d_C, mem_size_C,cudaMemcpyDeviceToHost) );
//get_cputime(<ime,&stime);
//printf("GPU Processing time: %10.3f (sec)\n", stime);
//float* reference = (float*) malloc(mem_size_C);
/*
for (int i = 0; i < n; ++i){
reference[i*3] = 0.e0;
reference[i*3+1] = 0.e0;
reference[i*3+2] = 0.e0;
}*/
//get_cputime(<ime,&stime);
//ewrepotGold_f(reference, x_float, n, xmax_float, alpha_float);
/*
unsigned int size_E = n * 3;
unsigned int mem_size_E = size_E * sizeof(double);
double* force_double = (double*) malloc(mem_size_E);
for (int i = 0; i < n; ++i){
force_double[i*3] = 0.e0;
force_double[i*3+1] = 0.e0;
force_double[i*3+2] =0.e0;
}
*/
//ewrepotGold_d(force_double, x, x, n, xmax);
//get_cputime(<ime,&stime);
//printf("HOST Processing time: %10.3f (sec)\n", stime);
//double sum_gpu = 0.e0;
//double sum_host = 0.e0;
for (int i = 0; i < n; ++i){
//sum_gpu += (double)h_C[i*3];
//sum_host += reference[i*3+1];
//printf("%16.6f %16.6f %d \n",f_float[i*3],reference[i*3],i);
force[i*3] += (double)f_float[i*3];
//force[i*3+1] += (double)f_float[i*3+1];
//force[i*3+2] += (double)f_float[i*3+2];
/*
force[i*3] += force_double[i*3];
force[i*3+1] += force_double[i*3+1];
force[i*3+2] += force_double[i*3+2];
*/
}
//printf("GPU : %20.8f \n",sum_gpu);
//printf("GPU : %20.8f \n",f_float[6]);
//printf("HOST: %20.8f \n",reference[6]);
free(x_float);
free(f_float);
//free(reference);
CUDA_SAFE_CALL(cudaFree(d_A));
//CUDA_SAFE_CALL(cudaFree(d_B));
CUDA_SAFE_CALL(cudaFree(d_C));
}
/*
extern "C"
void
gpuvdwpot_ (double* x, int *n, int* atype, int *nat, double* gscale, double* rscale, int *tblno, double *xmax, int *periodicflag, int *natchangeflag, double* force)
{
gpuvdwpot_ (x,*n,atype,*nat,gscale,rscale,*tblno,*xmax,*periodicflag,*natchangeflag,force);
}
*/
extern "C"
void
gpuewrepot__ (double* x, int *n, double* q, double *rscale, int *tblno, double *xmax, int *periodicflag, int *natchangeflag, double* force)
{
gpuewrepot_ (x,*n,q,*rscale,*tblno,*xmax,*periodicflag,*natchangeflag,force);
}
/*
extern "C"
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (data1[k] != data2[k]) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f\n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf("\nTotal Errors = %d\n", error_count);
}
*/
/*
void
ewrepotGold(float* C, const double* A, const double* B, unsigned int num_a, double xmax)
{
double sum;
double dn2;
double dn6;
double dx;
double dy;
double dz;
double l2 = xmax * 0.5;
double exclude_radius2 = 0.1;
for (unsigned int i = 0; i < num_a; ++i){
sum = 0;
for (unsigned int j = 0; j < num_a; ++j) {
dx = (A[i*3] - A[j*3] );
dy = (A[i*3+1] - A[j*3+1]);
dz = (A[i*3+2] - A[j*3+2]);
if (!(dx < l2 && dx > -l2))
if (dx > l2){
dx = dx - xmax;
}else{
dx = dx + xmax;
}
if (!(dy < l2 && dy > -l2))
if (dy > l2){
dy = dy - xmax;
}else{
dy = dy + xmax;
}
if (!(dz < l2 && dz > -l2))
if (dz > l2){
dz = dz - xmax;
}else{
dz = dz + xmax;
}
dn2 = (dx * dx + dy * dy + dz * dz) * 1;
if (dn2 > exclude_radius2){
dn6 = 1.e0 / (dn2 * dn2 * dn2);
sum += 4 * dn6 * (dn6 - 1.e0);
}
}
C[i*3] = (float)sum;
}
}
void
ewrepotGold_d(double* C, const double* A, const double* B, unsigned int num_a, double xmax)
{
double sum;
double dn2;
double dn6;
double dx;
double dy;
double dz;
double l2 = xmax * 0.5;
double exclude_radius2 = 0.01e0;
double cutoff_radius2 = 9.e0;
for (unsigned int i = 0; i < num_a; ++i){
sum = 0.e0;
for (unsigned int j = 0; j < num_a; ++j) {
dx = (A[i*3] - A[j*3] );
dy = (A[i*3+1] - A[j*3+1]);
dz = (A[i*3+2] - A[j*3+2]);
if (!(dx < l2 && dx > -l2))
if (dx > l2){
dx = dx - xmax;
}else{
dx = dx + xmax;
}
if (!(dy < l2 && dy > -l2))
if (dy > l2){
dy = dy - xmax;
}else{
dy = dy + xmax;
}
if (!(dz < l2 && dz > -l2))
if (dz > l2){
dz = dz - xmax;
}else{
dz = dz + xmax;
}
dn2 = dx * dx + dy * dy + dz * dz;
if ((i != j) && dn2 < cutoff_radius2){
dn6 = 1.e0 / dn2 / dn2 / dn2 / dn2 / dn2 / dn2
- 1.e0 / dn2 / dn2 / dn2;
sum += dn6 * 4.e0;
}
dn2 = (dx * dx + dy * dy + dz * dz) * 1;
if (dn2 > exclude_radius2 && dn2 < cutoff_radius2){
dn6 = 1.e0 / (dn2 * dn2 * dn2);
sum += 4 * dn6 * (dn6 - 1.e0);
}
}
C[i*3] = sum;
}
}
*/
void
ewrepotGold_f(float* C, const float* A, unsigned int num_a, float xmax, float alpha)
{
float sum;
float dn2;
float dx;
float dy;
float dz;
float l2 = xmax * 0.5;
float sqdn;
//float exclude_radius2 = 0.01e0;
//float cutoff_radius2 = 9.e0;
for (unsigned int i = 0; i < num_a; ++i){
sum = 0.e0;
for (unsigned int j = 0; j < num_a; ++j) {
dx = (A[i*4] - A[j*4] );
dy = (A[i*4+1] - A[j*4+1]);
dz = (A[i*4+2] - A[j*4+2]);
if (!(dx < l2 && dx > -l2))
if (dx > l2){
dx = dx - xmax;
}else{
dx = dx + xmax;
}
if (!(dy < l2 && dy > -l2))
if (dy > l2){
dy = dy - xmax;
}else{
dy = dy + xmax;
}
if (!(dz < l2 && dz > -l2))
if (dz > l2){
dz = dz - xmax;
}else{
dz = dz + xmax;
}
dn2 = dx * dx + dy * dy + dz * dz;
//if ((i != j) && dn2 < cutoff_radius2){
if ((i != j)){
sqdn = sqrt(dn2) * alpha;
sum += erfc(sqdn) / sqdn * A[j*4+3];
}
}
C[i*3] = sum * A[i*4+3] * alpha;
}
}
|
6d855932df7f75bff81d6922fae625a5f770a0be.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:NOT_ALL_VERIFIED
//--blockDim=512 --gridDim=64 --loop-unwind=2 --no-inline
//kernel.cu: error: possible write-write race on B
#include <hip/hip_runtime.h>
extern "C" {
__global__ void helloCUDA(float *A)
{
__shared__ float B[256];
for(int i = 0; i < 10; i ++) {
B[i] = A[i];
}
}
}
| 6d855932df7f75bff81d6922fae625a5f770a0be.cu | //xfail:NOT_ALL_VERIFIED
//--blockDim=512 --gridDim=64 --loop-unwind=2 --no-inline
//kernel.cu: error: possible write-write race on B
#include <cuda.h>
extern "C" {
__global__ void helloCUDA(float *A)
{
__shared__ float B[256];
for(int i = 0; i < 10; i ++) {
B[i] = A[i];
}
}
}
|
877ccd78215f377312e9a7d183b9e7a5baef57a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <algorithm>
#include <stdio.h>
#include "group_norm.h"
#include "amir_cuda_util/cuda_util.h"
namespace amirstan
{
namespace plugin
{
using namespace amirstan::cuda;
template<typename T>
__global__ void group_norm_kernel(T* output,const T* input, size_t input_size,
int batch_size, int num_groups, int num_channels, int WH,
T eps,
T * mean, T * var, const T* weight,const T* bias){
CUDA_KERNEL_LOOP(i, input_size) {
const int mean_var_index = i/(num_channels*WH/num_groups);
const int axpy_index = (i%(num_channels*WH))/WH;
T ret = (input[i]- mean[mean_var_index])/sqrt(var[mean_var_index]+eps);
ret = ret*weight[axpy_index] + bias[axpy_index];
output[i] = ret;
}
}
template<typename T>
void compute_group_norm(T* output, const T* input,
int batch_size, int num_groups, int num_channels, int WH,
T eps,
const T* weight,const T* bias, hipStream_t stream, void* workspace){
T* mean = (T*)workspace;
T* var = mean + batch_size*num_groups;
int mean_var_shape[2] = {batch_size*num_groups, num_channels*WH/num_groups};
bool mean_var_reduce_dims[2] = {false,true};
amirstan::cuda::tensorMeanVar<T>(mean,var, input,
&mean_var_shape[0], &mean_var_reduce_dims[0] , 2,
stream, (void*)(var+batch_size*num_groups));
size_t input_size = batch_size * num_channels * WH;
hipLaunchKernelGGL(( group_norm_kernel<T>), dim3(GET_BLOCKS(input_size)), dim3(CUDA_NUM_THREADS),0,stream, output, input, input_size,
batch_size, num_groups, num_channels, WH,
eps,
mean, var, weight, bias);
}
template void compute_group_norm<float>(float* output, const float* input,
int batch_size, int num_groups, int num_channels, int WH,
float eps,
const float* weight,const float* bias, hipStream_t stream, void* workspace);
}
} | 877ccd78215f377312e9a7d183b9e7a5baef57a1.cu | #include <cmath>
#include <algorithm>
#include <stdio.h>
#include "group_norm.h"
#include "amir_cuda_util/cuda_util.h"
namespace amirstan
{
namespace plugin
{
using namespace amirstan::cuda;
template<typename T>
__global__ void group_norm_kernel(T* output,const T* input, size_t input_size,
int batch_size, int num_groups, int num_channels, int WH,
T eps,
T * mean, T * var, const T* weight,const T* bias){
CUDA_KERNEL_LOOP(i, input_size) {
const int mean_var_index = i/(num_channels*WH/num_groups);
const int axpy_index = (i%(num_channels*WH))/WH;
T ret = (input[i]- mean[mean_var_index])/sqrt(var[mean_var_index]+eps);
ret = ret*weight[axpy_index] + bias[axpy_index];
output[i] = ret;
}
}
template<typename T>
void compute_group_norm(T* output, const T* input,
int batch_size, int num_groups, int num_channels, int WH,
T eps,
const T* weight,const T* bias, cudaStream_t stream, void* workspace){
T* mean = (T*)workspace;
T* var = mean + batch_size*num_groups;
int mean_var_shape[2] = {batch_size*num_groups, num_channels*WH/num_groups};
bool mean_var_reduce_dims[2] = {false,true};
amirstan::cuda::tensorMeanVar<T>(mean,var, input,
&mean_var_shape[0], &mean_var_reduce_dims[0] , 2,
stream, (void*)(var+batch_size*num_groups));
size_t input_size = batch_size * num_channels * WH;
group_norm_kernel<T><<<GET_BLOCKS(input_size), CUDA_NUM_THREADS,0,stream>>>(output, input, input_size,
batch_size, num_groups, num_channels, WH,
eps,
mean, var, weight, bias);
}
template void compute_group_norm<float>(float* output, const float* input,
int batch_size, int num_groups, int num_channels, int WH,
float eps,
const float* weight,const float* bias, cudaStream_t stream, void* workspace);
}
} |
9ce37c50832ef3b85f66f42165c341e4c8e6d6d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel3(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * 2 * blockDim.x + threadIdx.x;
if(i < n){
scratch[threadIdx.x] = g_idata[i] + g_idata[i+blockDim.x];
} else{
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x/2; s > 0; s = s >> 1) {
if(threadIdx.x < s){
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x == 0){
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_3, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 3;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
hipDeviceSynchronize ();
t_kernel_3 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute first add GPU reduction kernel: %Lg secs\n", t_kernel_3);
double bw = (N * sizeof(dtype)) / (t_kernel_3 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| 9ce37c50832ef3b85f66f42165c341e4c8e6d6d0.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel3(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * 2 * blockDim.x + threadIdx.x;
if(i < n){
scratch[threadIdx.x] = g_idata[i] + g_idata[i+blockDim.x];
} else{
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x/2; s > 0; s = s >> 1) {
if(threadIdx.x < s){
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x == 0){
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_3, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 3;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel3 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel3 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
kernel3 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
cudaThreadSynchronize ();
t_kernel_3 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute first add GPU reduction kernel: %Lg secs\n", t_kernel_3);
double bw = (N * sizeof(dtype)) / (t_kernel_3 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
d8828c24644c7276abe2bca651b2a105374aba73.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/cgemv2.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 2.0.0
* @author Ahmad Abdelfattah
* @date 2017-11-13
**/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include "gemv2_core.cuh"
#if(SM >= 30)
#define cgemvn_nb (32)
#define cgemvn_ntcol (4)
#define cgemvn_ept (2)
#define cgemvn_width (cgemvn_ntcol*cgemvn_ept)
#define cgemvn_by (16)
#define cgemvt_nb (32)
#define cgemvt_ntcol (2)
#define cgemvt_ept (4)
#define cgemvt_width (cgemvt_ntcol*cgemvt_ept)
#define cgemvt_by (8)
#else
#define cgemvn_nb (64)
#define cgemvn_ntcol (8)
#define cgemvn_ept (2)
#define cgemvn_width (cgemvn_ntcol*cgemvn_ept)
#define cgemvn_by (1)
#define cgemvt_nb (64)
#define cgemvt_ntcol (8)
#define cgemvt_ept (2)
#define cgemvt_width (cgemvt_ntcol*cgemvt_ept)
#define cgemvt_by (1)
#endif
extern "C"
int kblas_cscal_async(int n, cuFloatComplex alpha, cuFloatComplex *x, int incx, hipStream_t stream);
int kblas_cgemv2_driver( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
hipStream_t stream)
{
if(trans == 'n' || trans == 'N')
{
// scaling with beta
kblas_cscal_async(rows, beta, dY, incy, stream);
int mod_r = rows % cgemvn_nb;
int mod_c = cols % cgemvn_width;
int blocks = rows/cgemvn_nb;
if(mod_r != 0) blocks += 1;
const int thread_x = cgemvn_nb;
const int thread_y = cgemvn_ntcol;
const int ept = cgemvn_ept;
int threshold = mod_c / ept;
int ept_ = mod_c % ept;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvn_by);
switch(ept_)
{
case 0:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 1:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 2:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 3:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 4:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 5:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 6:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 7:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 8:hipLaunchKernelGGL(( gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
default: printf("irregular part %d is not supported, please extend the case statement of cgemv\n", ept_); exit(1);
}
} // end of non-transpose case
else if(trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
// scaling with beta
kblas_cscal_async(cols, beta, dY, incy, stream);
int mod_r = rows % cgemvt_nb;
int mod_c = cols % cgemvt_width;
int blocks = cols/cgemvt_width;
if(mod_c != 0) blocks += 1;
const int thread_x = cgemvt_nb;
const int thread_y = cgemvt_ntcol;
const int ept = cgemvt_ept;
int threshold = mod_c / ept;
int ept_ = mod_c % ept;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvt_by);
int conj;
if(trans == 'c' || trans == 'C')conj = 1;
else conj = 0;
//printf("modr = %d, modc = %d, threshold = %d, ept_ = %d \n", mod_r, mod_c, threshold, ept_);
switch(ept_)
{
case 0:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 1:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 2:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 3:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 4:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 5:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 6:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 7:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 8:hipLaunchKernelGGL(( gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
default: printf("irregular part %d is not supported, please extend the case statement of cgemv\n", ept_); exit(1);
}
}
else
{
printf("CGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
extern "C"
int kblas_cgemv2(char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy)
{
return kblas_cgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, 0);
}
extern "C"
int kblas_cgemv2_async( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
hipStream_t stream)
{
return kblas_cgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, stream);
}
| d8828c24644c7276abe2bca651b2a105374aba73.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/cgemv2.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 2.0.0
* @author Ahmad Abdelfattah
* @date 2017-11-13
**/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include "gemv2_core.cuh"
#if(SM >= 30)
#define cgemvn_nb (32)
#define cgemvn_ntcol (4)
#define cgemvn_ept (2)
#define cgemvn_width (cgemvn_ntcol*cgemvn_ept)
#define cgemvn_by (16)
#define cgemvt_nb (32)
#define cgemvt_ntcol (2)
#define cgemvt_ept (4)
#define cgemvt_width (cgemvt_ntcol*cgemvt_ept)
#define cgemvt_by (8)
#else
#define cgemvn_nb (64)
#define cgemvn_ntcol (8)
#define cgemvn_ept (2)
#define cgemvn_width (cgemvn_ntcol*cgemvn_ept)
#define cgemvn_by (1)
#define cgemvt_nb (64)
#define cgemvt_ntcol (8)
#define cgemvt_ept (2)
#define cgemvt_width (cgemvt_ntcol*cgemvt_ept)
#define cgemvt_by (1)
#endif
extern "C"
int kblas_cscal_async(int n, cuFloatComplex alpha, cuFloatComplex *x, int incx, cudaStream_t stream);
int kblas_cgemv2_driver( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
cudaStream_t stream)
{
if(trans == 'n' || trans == 'N')
{
// scaling with beta
kblas_cscal_async(rows, beta, dY, incy, stream);
int mod_r = rows % cgemvn_nb;
int mod_c = cols % cgemvn_width;
int blocks = rows/cgemvn_nb;
if(mod_r != 0) blocks += 1;
const int thread_x = cgemvn_nb;
const int thread_y = cgemvn_ntcol;
const int ept = cgemvn_ept;
int threshold = mod_c / ept;
int ept_ = mod_c % ept;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvn_by);
switch(ept_)
{
case 0: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 1: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 2: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 3: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 4: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 5: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 6: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 7: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
case 8: gemvn<cuFloatComplex, cgemvn_nb, cgemvn_ntcol, ept, cgemvn_width, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break;
default: printf("irregular part %d is not supported, please extend the case statement of cgemv\n", ept_); exit(1);
}
} // end of non-transpose case
else if(trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
// scaling with beta
kblas_cscal_async(cols, beta, dY, incy, stream);
int mod_r = rows % cgemvt_nb;
int mod_c = cols % cgemvt_width;
int blocks = cols/cgemvt_width;
if(mod_c != 0) blocks += 1;
const int thread_x = cgemvt_nb;
const int thread_y = cgemvt_ntcol;
const int ept = cgemvt_ept;
int threshold = mod_c / ept;
int ept_ = mod_c % ept;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvt_by);
int conj;
if(trans == 'c' || trans == 'C')conj = 1;
else conj = 0;
//printf("modr = %d, modc = %d, threshold = %d, ept_ = %d \n", mod_r, mod_c, threshold, ept_);
switch(ept_)
{
case 0: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 1: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 2: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 3: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 4: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 5: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 6: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 7: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
case 8: gemvt<cuFloatComplex, cgemvt_nb, cgemvt_ntcol, ept, cgemvt_width, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break;
default: printf("irregular part %d is not supported, please extend the case statement of cgemv\n", ept_); exit(1);
}
}
else
{
printf("CGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
extern "C"
int kblas_cgemv2(char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy)
{
return kblas_cgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, 0);
}
extern "C"
int kblas_cgemv2_async( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
cudaStream_t stream)
{
return kblas_cgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, stream);
}
|
c108e5055f07e0e863a120ea94b5383b551bf945.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifdef WITH_CUDA
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/eye_kernel_util.h"
namespace oneflow {
namespace user_op {
template<typename T>
__global__ void EyeForwardGpuKernel(const int64_t cols, const int64_t rows, T* out) {
SetOneInDiag(cols, rows, out);
}
template<typename T>
struct EyeFunctor<DeviceType::kCUDA, T> final {
void operator()(ep::Stream* stream, const int64_t& cols, const int64_t& rows, T* out) {
RUN_CUDA_KERNEL((EyeForwardGpuKernel<T>), stream, rows, cols, rows, out);
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_EYE_FUNCTOR, (DeviceType::kCUDA), EYE_DATA_TYPE_SEQ);
} // namespace user_op
} // namespace oneflow
#endif // End WITH_CUDA
| c108e5055f07e0e863a120ea94b5383b551bf945.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifdef WITH_CUDA
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/eye_kernel_util.h"
namespace oneflow {
namespace user_op {
template<typename T>
__global__ void EyeForwardGpuKernel(const int64_t cols, const int64_t rows, T* out) {
SetOneInDiag(cols, rows, out);
}
template<typename T>
struct EyeFunctor<DeviceType::kCUDA, T> final {
void operator()(ep::Stream* stream, const int64_t& cols, const int64_t& rows, T* out) {
RUN_CUDA_KERNEL((EyeForwardGpuKernel<T>), stream, rows, cols, rows, out);
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_EYE_FUNCTOR, (DeviceType::kCUDA), EYE_DATA_TYPE_SEQ);
} // namespace user_op
} // namespace oneflow
#endif // End WITH_CUDA
|
227456d4704daf49be54f42e10d63872c4971ac4.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#define vect_len 33
using namespace std;
const int blocksize = 32;
// __global__ decorator signifies a kernel that can be called from the host
__global__ void vec_con_1(int *a, int *b, int n)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < vect_len)
for (int j = 0; j < n; j++)
{
if (id < vect_len / 3)
continue;
else
{
if (id < vect_len / 3 * 2)
a[id] +=10;
else
{
b[id] += 4;
if (b[id] == 200)
break;
}
a[id] += 1;
}
b[id] += 1;
}
}
int main(){
const int vect_size = vect_len*sizeof(int);
int * vect1=(int*)malloc(vect_size);
int * vect2=(int*)malloc(vect_size);
int * result_v1=(int*)malloc(vect_size);
int * result_v2=(int*)malloc(vect_size);
bool flag;
for(int i = 0; i < vect_len; i++)
{
vect1[i] = i;
vect2[i] = 2 * i;
}
int *ad, *bd;
// initialize device memory
hipMalloc( (void**)&ad, vect_size );
hipMalloc( (void**)&bd, vect_size );
// copy data to device
hipMemcpy( ad, vect1, vect_size, hipMemcpyHostToDevice );
hipMemcpy( bd, vect2, vect_size, hipMemcpyHostToDevice );
// setup block and grid size
dim3 dimBlock( blocksize, 1, 1);
dim3 dimGrid((vect_len + blocksize - 1)/blocksize, 1 , 1);
// call device kernel
hipLaunchKernelGGL(( vec_con_1), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd, 10);
hipMemcpy( result_v1, ad, vect_size, hipMemcpyDeviceToHost );
hipMemcpy( result_v2, bd, vect_size, hipMemcpyDeviceToHost );
flag = true;
for (int id = 0; id < vect_len; id++)
{
int a = id;
int b = 2 * id;
for (int j = 0; j < 10; j++)
{
if (id < vect_len / 3)
continue;
else
{
if (id < vect_len /3 * 2)
a +=10;
else
{
b += 4;
if (b == 200)
break;
}
a += 1;
}
b += 1;
}
if (a != result_v1[id])
{
cout << "Test 1 Error at a " << id << " expecting "
<< a << " getting " << result_v1[id] <<endl;
flag = false;
}
if (b != result_v2[id])
{
cout << "Test 1 Error at b " << id << " expecting "
<< b << " getting " << result_v2[id] <<endl;
flag = false;
}
}
if(flag_1)
cout << "Verification test passes." <<endl;
// free device memory
hipFree( ad );
hipFree( bd );
free(vect1);
free(vect2);
free(result_v1);
free(result_v2);
return EXIT_SUCCESS;
}
| 227456d4704daf49be54f42e10d63872c4971ac4.cu | #include <iostream>
#include <cuda.h>
#define vect_len 33
using namespace std;
const int blocksize = 32;
// __global__ decorator signifies a kernel that can be called from the host
__global__ void vec_con_1(int *a, int *b, int n)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < vect_len)
for (int j = 0; j < n; j++)
{
if (id < vect_len / 3)
continue;
else
{
if (id < vect_len / 3 * 2)
a[id] +=10;
else
{
b[id] += 4;
if (b[id] == 200)
break;
}
a[id] += 1;
}
b[id] += 1;
}
}
int main(){
const int vect_size = vect_len*sizeof(int);
int * vect1=(int*)malloc(vect_size);
int * vect2=(int*)malloc(vect_size);
int * result_v1=(int*)malloc(vect_size);
int * result_v2=(int*)malloc(vect_size);
bool flag;
for(int i = 0; i < vect_len; i++)
{
vect1[i] = i;
vect2[i] = 2 * i;
}
int *ad, *bd;
// initialize device memory
cudaMalloc( (void**)&ad, vect_size );
cudaMalloc( (void**)&bd, vect_size );
// copy data to device
cudaMemcpy( ad, vect1, vect_size, cudaMemcpyHostToDevice );
cudaMemcpy( bd, vect2, vect_size, cudaMemcpyHostToDevice );
// setup block and grid size
dim3 dimBlock( blocksize, 1, 1);
dim3 dimGrid((vect_len + blocksize - 1)/blocksize, 1 , 1);
// call device kernel
vec_con_1<<<dimGrid, dimBlock>>>(ad, bd, 10);
cudaMemcpy( result_v1, ad, vect_size, cudaMemcpyDeviceToHost );
cudaMemcpy( result_v2, bd, vect_size, cudaMemcpyDeviceToHost );
flag = true;
for (int id = 0; id < vect_len; id++)
{
int a = id;
int b = 2 * id;
for (int j = 0; j < 10; j++)
{
if (id < vect_len / 3)
continue;
else
{
if (id < vect_len /3 * 2)
a +=10;
else
{
b += 4;
if (b == 200)
break;
}
a += 1;
}
b += 1;
}
if (a != result_v1[id])
{
cout << "Test 1 Error at a " << id << " expecting "
<< a << " getting " << result_v1[id] <<endl;
flag = false;
}
if (b != result_v2[id])
{
cout << "Test 1 Error at b " << id << " expecting "
<< b << " getting " << result_v2[id] <<endl;
flag = false;
}
}
if(flag_1)
cout << "Verification test passes." <<endl;
// free device memory
cudaFree( ad );
cudaFree( bd );
free(vect1);
free(vect2);
free(result_v1);
free(result_v2);
return EXIT_SUCCESS;
}
|
dd44c2ab4a2eead3ddd53054befca520e9d07817.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
// modify from
// https://github.com/NVIDIA/TensorRT/tree/master/plugin/batchedNMSPlugin
#include <vector>
#include "hipcub/hipcub.hpp"
#include "nms/cub_helper.h"
#include "nms/kernel.h"
#include "trt_plugin_helper.hpp"
template <typename T_SCORE, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta) __global__
void prepareSortData(const int num, const int num_classes, const int num_preds_per_class,
const int background_label_id, const float confidence_threshold,
T_SCORE *conf_scores_gpu, T_SCORE *temp_scores, int *temp_idx,
int *d_offsets) {
// Prepare scores data for sort
const int cur_idx = blockIdx.x * nthds_per_cta + threadIdx.x;
const int numPredsPerBatch = num_classes * num_preds_per_class;
if (cur_idx < numPredsPerBatch) {
const int class_idx = cur_idx / num_preds_per_class;
for (int i = 0; i < num; i++) {
const int targetIdx = i * numPredsPerBatch + cur_idx;
const T_SCORE score = conf_scores_gpu[targetIdx];
// "Clear" background labeled score and index
// Because we do not care about background
if (class_idx == background_label_id) {
// Set scores to 0
// Set label = -1
temp_scores[targetIdx] = 0.0f;
temp_idx[targetIdx] = -1;
conf_scores_gpu[targetIdx] = 0.0f;
}
// "Clear" scores lower than threshold
else {
if (score > confidence_threshold) {
temp_scores[targetIdx] = score;
temp_idx[targetIdx] = cur_idx + i * numPredsPerBatch;
} else {
// Set scores to 0
// Set label = -1
temp_scores[targetIdx] = 0.0f;
temp_idx[targetIdx] = -1;
conf_scores_gpu[targetIdx] = 0.0f;
// TODO: HERE writing memory too many times
}
}
if ((cur_idx % num_preds_per_class) == 0) {
const int offset_ct = i * num_classes + cur_idx / num_preds_per_class;
d_offsets[offset_ct] = offset_ct * num_preds_per_class;
// set the last element in d_offset
if (blockIdx.x == 0 && threadIdx.x == 0)
d_offsets[num * num_classes] = num * numPredsPerBatch;
}
}
}
}
template <typename T_SCORE>
pluginStatus_t sortScoresPerClass_gpu(hipStream_t stream, const int num, const int num_classes,
const int num_preds_per_class, const int background_label_id,
const float confidence_threshold, void *conf_scores_gpu,
void *index_array_gpu, void *workspace) {
const int num_segments = num * num_classes;
void *temp_scores = workspace;
const int arrayLen = num * num_classes * num_preds_per_class;
void *temp_idx = nextWorkspacePtr((int8_t *)temp_scores, arrayLen * sizeof(T_SCORE));
void *d_offsets = nextWorkspacePtr((int8_t *)temp_idx, arrayLen * sizeof(int));
size_t cubOffsetSize = (num_segments + 1) * sizeof(int);
void *cubWorkspace = nextWorkspacePtr((int8_t *)d_offsets, cubOffsetSize);
const int BS = 512;
const int GS = (num_classes * num_preds_per_class + BS - 1) / BS;
hipLaunchKernelGGL(( prepareSortData<T_SCORE, BS>), dim3(GS), dim3(BS), 0, stream,
num, num_classes, num_preds_per_class, background_label_id, confidence_threshold,
(T_SCORE *)conf_scores_gpu, (T_SCORE *)temp_scores, (int *)temp_idx, (int *)d_offsets);
size_t temp_storage_bytes = cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_segments);
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
cubWorkspace, temp_storage_bytes, (const T_SCORE *)(temp_scores),
(T_SCORE *)(conf_scores_gpu), (const int *)(temp_idx), (int *)(index_array_gpu), arrayLen,
num_segments, (const int *)d_offsets, (const int *)d_offsets + 1, 0, sizeof(T_SCORE) * 8,
stream);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerClass LAUNCH CONFIG
typedef pluginStatus_t (*sspcFunc)(hipStream_t, const int, const int, const int, const int,
const float, void *, void *, void *);
struct sspcLaunchConfig {
DataType t_score;
sspcFunc function;
sspcLaunchConfig(DataType t_score) : t_score(t_score) {}
sspcLaunchConfig(DataType t_score, sspcFunc function) : t_score(t_score), function(function) {}
bool operator==(const sspcLaunchConfig &other) { return t_score == other.t_score; }
};
static std::vector<sspcLaunchConfig> sspcFuncVec;
bool sspcInit() {
sspcFuncVec.push_back(sspcLaunchConfig(DataType::kFLOAT, sortScoresPerClass_gpu<float>));
return true;
}
static bool initialized = sspcInit();
pluginStatus_t sortScoresPerClass(hipStream_t stream, const int num, const int num_classes,
const int num_preds_per_class, const int background_label_id,
const float confidence_threshold, const DataType DT_SCORE,
void *conf_scores_gpu, void *index_array_gpu, void *workspace) {
sspcLaunchConfig lc = sspcLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < sspcFuncVec.size(); ++i) {
if (lc == sspcFuncVec[i]) {
DEBUG_PRINTF("sortScoresPerClass kernel %d\n", i);
return sspcFuncVec[i].function(stream, num, num_classes, num_preds_per_class,
background_label_id, confidence_threshold, conf_scores_gpu,
index_array_gpu, workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t sortScoresPerClassWorkspaceSize(const int num, const int num_classes,
const int num_preds_per_class, const DataType DT_CONF) {
size_t wss[4];
const int arrayLen = num * num_classes * num_preds_per_class;
wss[0] = arrayLen * mmdeploy::getElementSize(DT_CONF); // temp scores
wss[1] = arrayLen * sizeof(int); // temp indices
wss[2] = (num * num_classes + 1) * sizeof(int); // offsets
if (DT_CONF == DataType::kFLOAT) {
wss[3] = cubSortPairsWorkspaceSize<float, int>(arrayLen, num * num_classes); // cub workspace
} else {
printf("SCORE type not supported\n");
return (size_t)-1;
}
return calculateTotalWorkspaceSize(wss, 4);
}
| dd44c2ab4a2eead3ddd53054befca520e9d07817.cu | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
// modify from
// https://github.com/NVIDIA/TensorRT/tree/master/plugin/batchedNMSPlugin
#include <vector>
#include "cub/cub.cuh"
#include "nms/cub_helper.h"
#include "nms/kernel.h"
#include "trt_plugin_helper.hpp"
template <typename T_SCORE, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta) __global__
void prepareSortData(const int num, const int num_classes, const int num_preds_per_class,
const int background_label_id, const float confidence_threshold,
T_SCORE *conf_scores_gpu, T_SCORE *temp_scores, int *temp_idx,
int *d_offsets) {
// Prepare scores data for sort
const int cur_idx = blockIdx.x * nthds_per_cta + threadIdx.x;
const int numPredsPerBatch = num_classes * num_preds_per_class;
if (cur_idx < numPredsPerBatch) {
const int class_idx = cur_idx / num_preds_per_class;
for (int i = 0; i < num; i++) {
const int targetIdx = i * numPredsPerBatch + cur_idx;
const T_SCORE score = conf_scores_gpu[targetIdx];
// "Clear" background labeled score and index
// Because we do not care about background
if (class_idx == background_label_id) {
// Set scores to 0
// Set label = -1
temp_scores[targetIdx] = 0.0f;
temp_idx[targetIdx] = -1;
conf_scores_gpu[targetIdx] = 0.0f;
}
// "Clear" scores lower than threshold
else {
if (score > confidence_threshold) {
temp_scores[targetIdx] = score;
temp_idx[targetIdx] = cur_idx + i * numPredsPerBatch;
} else {
// Set scores to 0
// Set label = -1
temp_scores[targetIdx] = 0.0f;
temp_idx[targetIdx] = -1;
conf_scores_gpu[targetIdx] = 0.0f;
// TODO: HERE writing memory too many times
}
}
if ((cur_idx % num_preds_per_class) == 0) {
const int offset_ct = i * num_classes + cur_idx / num_preds_per_class;
d_offsets[offset_ct] = offset_ct * num_preds_per_class;
// set the last element in d_offset
if (blockIdx.x == 0 && threadIdx.x == 0)
d_offsets[num * num_classes] = num * numPredsPerBatch;
}
}
}
}
template <typename T_SCORE>
pluginStatus_t sortScoresPerClass_gpu(cudaStream_t stream, const int num, const int num_classes,
const int num_preds_per_class, const int background_label_id,
const float confidence_threshold, void *conf_scores_gpu,
void *index_array_gpu, void *workspace) {
const int num_segments = num * num_classes;
void *temp_scores = workspace;
const int arrayLen = num * num_classes * num_preds_per_class;
void *temp_idx = nextWorkspacePtr((int8_t *)temp_scores, arrayLen * sizeof(T_SCORE));
void *d_offsets = nextWorkspacePtr((int8_t *)temp_idx, arrayLen * sizeof(int));
size_t cubOffsetSize = (num_segments + 1) * sizeof(int);
void *cubWorkspace = nextWorkspacePtr((int8_t *)d_offsets, cubOffsetSize);
const int BS = 512;
const int GS = (num_classes * num_preds_per_class + BS - 1) / BS;
prepareSortData<T_SCORE, BS><<<GS, BS, 0, stream>>>(
num, num_classes, num_preds_per_class, background_label_id, confidence_threshold,
(T_SCORE *)conf_scores_gpu, (T_SCORE *)temp_scores, (int *)temp_idx, (int *)d_offsets);
size_t temp_storage_bytes = cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_segments);
cub::DeviceSegmentedRadixSort::SortPairsDescending(
cubWorkspace, temp_storage_bytes, (const T_SCORE *)(temp_scores),
(T_SCORE *)(conf_scores_gpu), (const int *)(temp_idx), (int *)(index_array_gpu), arrayLen,
num_segments, (const int *)d_offsets, (const int *)d_offsets + 1, 0, sizeof(T_SCORE) * 8,
stream);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerClass LAUNCH CONFIG
typedef pluginStatus_t (*sspcFunc)(cudaStream_t, const int, const int, const int, const int,
const float, void *, void *, void *);
struct sspcLaunchConfig {
DataType t_score;
sspcFunc function;
sspcLaunchConfig(DataType t_score) : t_score(t_score) {}
sspcLaunchConfig(DataType t_score, sspcFunc function) : t_score(t_score), function(function) {}
bool operator==(const sspcLaunchConfig &other) { return t_score == other.t_score; }
};
static std::vector<sspcLaunchConfig> sspcFuncVec;
bool sspcInit() {
sspcFuncVec.push_back(sspcLaunchConfig(DataType::kFLOAT, sortScoresPerClass_gpu<float>));
return true;
}
static bool initialized = sspcInit();
pluginStatus_t sortScoresPerClass(cudaStream_t stream, const int num, const int num_classes,
const int num_preds_per_class, const int background_label_id,
const float confidence_threshold, const DataType DT_SCORE,
void *conf_scores_gpu, void *index_array_gpu, void *workspace) {
sspcLaunchConfig lc = sspcLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < sspcFuncVec.size(); ++i) {
if (lc == sspcFuncVec[i]) {
DEBUG_PRINTF("sortScoresPerClass kernel %d\n", i);
return sspcFuncVec[i].function(stream, num, num_classes, num_preds_per_class,
background_label_id, confidence_threshold, conf_scores_gpu,
index_array_gpu, workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t sortScoresPerClassWorkspaceSize(const int num, const int num_classes,
const int num_preds_per_class, const DataType DT_CONF) {
size_t wss[4];
const int arrayLen = num * num_classes * num_preds_per_class;
wss[0] = arrayLen * mmdeploy::getElementSize(DT_CONF); // temp scores
wss[1] = arrayLen * sizeof(int); // temp indices
wss[2] = (num * num_classes + 1) * sizeof(int); // offsets
if (DT_CONF == DataType::kFLOAT) {
wss[3] = cubSortPairsWorkspaceSize<float, int>(arrayLen, num * num_classes); // cub workspace
} else {
printf("SCORE type not supported\n");
return (size_t)-1;
}
return calculateTotalWorkspaceSize(wss, 4);
}
|
05dc7b00435811b7daeb00042d5469792f833ca7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
//__global__ void mykernel(void) {i
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
int main (void) {
int a, b, c;
int *d_a, *d_b, *d_c;
int size=sizeof(int);
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
a=2;
b=7;
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("c=%d\n", c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//mykernel<<<1,1>>>();
//printf("Hello world\n");
return 0;
}
| 05dc7b00435811b7daeb00042d5469792f833ca7.cu | #include <stdio.h>
//__global__ void mykernel(void) {i
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
int main (void) {
int a, b, c;
int *d_a, *d_b, *d_c;
int size=sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a=2;
b=7;
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
add<<<1,1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("c=%d\n", c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//mykernel<<<1,1>>>();
//printf("Hello world\n");
return 0;
}
|
18031f917ad2f65c0861b7ea2ece12d479349177.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates how use texture fetches in CUDA
*
* This sample takes an input PGM image (image_filename) and generates
* an output PGM image (image_filename_out). This CUDA kernel performs
* a simple 2D transform (rotation) on the texture coordinates (u,v).
*/
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// Includes CUDA
#include <hip/hip_runtime.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#define MAX_EPSILON_ERROR 5e-3f
// Define the files that are to be save and the reference images for validation
const char *imageFilename = "/home/simbarashe/Downloads/assignment3/assignement2Helper/data/lena_bw.pgm";
const char *refFilename = "/home/simbarashe/Downloads/assignment3/assignement2Helper/data/ref_rotated.pgm";
const char *sampleName = "/home/simbarashe/Downloads/assignment3/assignement2Helper/simpleTexture";
////////////////////////////////////////////////////////////////////////////////
// Constants
const float angle = 0.5f; // angle to rotate image by (in radians)
// Texture reference for 2D float texture
texture<float, 2, hipReadModeElementType> tex;
// Auto-Verification Code
bool testResult = true;
////////////////////////////////////////////////////////////////////////////////
//! Transform an image using texture lookups
//! @param outputData output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void transformKernel(float *outputData,
int width,
int height,
float theta)
{
// calculate normalized texture coordinates
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = (float)x - (float)width/2;
float v = (float)y - (float)height/2;
float tu = u*cosf(theta) - v*sinf(theta);
float tv = v*cosf(theta) + u*sinf(theta);
tu /= (float)width;
tv /= (float)height;
// read from texture and write to global memory
outputData[y*width + x] = tex2D(tex, tu+0.5f, tv+0.5f);
}
////////////////////////////////////////////////////////////////////////////////
// Declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("%s starting...\n", sampleName);
// Process command-line arguments
if (argc > 1)
{
if (checkCmdLineFlag(argc, (const char **) argv, "input"))
{
getCmdLineArgumentString(argc,
(const char **) argv,
"input",
(char **) &imageFilename);
if (checkCmdLineFlag(argc, (const char **) argv, "reference"))
{
getCmdLineArgumentString(argc,
(const char **) argv,
"reference",
(char **) &refFilename);
}
else
{
printf("-input flag should be used with -reference flag");
exit(EXIT_FAILURE);
}
}
else if (checkCmdLineFlag(argc, (const char **) argv, "reference"))
{
printf("-reference flag should be used with -input flag");
exit(EXIT_FAILURE);
}
}
runTest(argc, argv);
hipDeviceReset();
printf("%s completed, returned %s\n",
sampleName,
testResult ? "OK" : "ERROR!");
exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv)
{
int devID = findCudaDevice(argc, (const char **) argv);
// load image from disk
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
//Load reference image from image (output)
float *hDataRef = (float *) malloc(size);
char *refPath = sdkFindFilePath(refFilename, argv[0]);
if (refPath == NULL)
{
printf("Unable to find reference image file: %s\n", refFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(refPath, &hDataRef, &width, &height);
// Allocate device memory for result
float *dData = NULL;
checkCudaErrors(hipMalloc((void **) &dData, size));
// Allocate array and copy image data
hipChannelFormatDesc channelDesc =
hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *cuArray;
checkCudaErrors(hipMallocArray(&cuArray,&channelDesc,width,height));
checkCudaErrors(hipMemcpyToArray(cuArray,0, 0, hData,size,hipMemcpyHostToDevice));
// Set texture parameters
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
tex.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
checkCudaErrors(hipBindTextureToArray(tex, cuArray, channelDesc));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// Warmup
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dData, width, height, angle);
checkCudaErrors(hipDeviceSynchronize());
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// Execute the kernel
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dData, width, height, angle);
// Check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&timer);
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
printf("%.2f Mpixels/sec\n",
(width *height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
sdkDeleteTimer(&timer);
// Allocate mem for the result on host side
float *hOutputData = (float *) malloc(size);
// copy result from device to host
checkCudaErrors(hipMemcpy(hOutputData,
dData,
size,
hipMemcpyDeviceToHost));
// Write result to file
char outputFilename[1024];
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_out.pgm");
sdkSavePGM(outputFilename, hOutputData, width, height);
printf("Wrote '%s'\n", outputFilename);
// Write regression file if necessary
if (checkCmdLineFlag(argc, (const char **) argv, "regression"))
{
// Write file for regression test
sdkWriteFile<float>("./data/regression.dat",
hOutputData,
width*height,
0.0f,
false);
}
else
{
// We need to reload the data from disk,
// because it is inverted upon output
sdkLoadPGM(outputFilename, &hOutputData, &width, &height);
printf("Comparing files\n");
printf("\toutput: <%s>\n", outputFilename);
printf("\treference: <%s>\n", refPath);
testResult = compareData(hOutputData,
hDataRef,
width*height,
MAX_EPSILON_ERROR,
0.15f);
}
checkCudaErrors(hipFree(dData));
checkCudaErrors(hipFreeArray(cuArray));
free(imagePath);
free(refPath);
}
| 18031f917ad2f65c0861b7ea2ece12d479349177.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates how use texture fetches in CUDA
*
* This sample takes an input PGM image (image_filename) and generates
* an output PGM image (image_filename_out). This CUDA kernel performs
* a simple 2D transform (rotation) on the texture coordinates (u,v).
*/
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// Includes CUDA
#include <cuda_runtime.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#define MAX_EPSILON_ERROR 5e-3f
// Define the files that are to be save and the reference images for validation
const char *imageFilename = "/home/simbarashe/Downloads/assignment3/assignement2Helper/data/lena_bw.pgm";
const char *refFilename = "/home/simbarashe/Downloads/assignment3/assignement2Helper/data/ref_rotated.pgm";
const char *sampleName = "/home/simbarashe/Downloads/assignment3/assignement2Helper/simpleTexture";
////////////////////////////////////////////////////////////////////////////////
// Constants
const float angle = 0.5f; // angle to rotate image by (in radians)
// Texture reference for 2D float texture
texture<float, 2, cudaReadModeElementType> tex;
// Auto-Verification Code
bool testResult = true;
////////////////////////////////////////////////////////////////////////////////
//! Transform an image using texture lookups
//! @param outputData output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void transformKernel(float *outputData,
int width,
int height,
float theta)
{
// calculate normalized texture coordinates
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = (float)x - (float)width/2;
float v = (float)y - (float)height/2;
float tu = u*cosf(theta) - v*sinf(theta);
float tv = v*cosf(theta) + u*sinf(theta);
tu /= (float)width;
tv /= (float)height;
// read from texture and write to global memory
outputData[y*width + x] = tex2D(tex, tu+0.5f, tv+0.5f);
}
////////////////////////////////////////////////////////////////////////////////
// Declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("%s starting...\n", sampleName);
// Process command-line arguments
if (argc > 1)
{
if (checkCmdLineFlag(argc, (const char **) argv, "input"))
{
getCmdLineArgumentString(argc,
(const char **) argv,
"input",
(char **) &imageFilename);
if (checkCmdLineFlag(argc, (const char **) argv, "reference"))
{
getCmdLineArgumentString(argc,
(const char **) argv,
"reference",
(char **) &refFilename);
}
else
{
printf("-input flag should be used with -reference flag");
exit(EXIT_FAILURE);
}
}
else if (checkCmdLineFlag(argc, (const char **) argv, "reference"))
{
printf("-reference flag should be used with -input flag");
exit(EXIT_FAILURE);
}
}
runTest(argc, argv);
cudaDeviceReset();
printf("%s completed, returned %s\n",
sampleName,
testResult ? "OK" : "ERROR!");
exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv)
{
int devID = findCudaDevice(argc, (const char **) argv);
// load image from disk
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
//Load reference image from image (output)
float *hDataRef = (float *) malloc(size);
char *refPath = sdkFindFilePath(refFilename, argv[0]);
if (refPath == NULL)
{
printf("Unable to find reference image file: %s\n", refFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(refPath, &hDataRef, &width, &height);
// Allocate device memory for result
float *dData = NULL;
checkCudaErrors(cudaMalloc((void **) &dData, size));
// Allocate array and copy image data
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *cuArray;
checkCudaErrors(cudaMallocArray(&cuArray,&channelDesc,width,height));
checkCudaErrors(cudaMemcpyToArray(cuArray,0, 0, hData,size,cudaMemcpyHostToDevice));
// Set texture parameters
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
checkCudaErrors(cudaBindTextureToArray(tex, cuArray, channelDesc));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// Warmup
transformKernel<<<dimGrid, dimBlock, 0>>>(dData, width, height, angle);
checkCudaErrors(cudaDeviceSynchronize());
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// Execute the kernel
transformKernel<<<dimGrid, dimBlock, 0>>>(dData, width, height, angle);
// Check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&timer);
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
printf("%.2f Mpixels/sec\n",
(width *height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
sdkDeleteTimer(&timer);
// Allocate mem for the result on host side
float *hOutputData = (float *) malloc(size);
// copy result from device to host
checkCudaErrors(cudaMemcpy(hOutputData,
dData,
size,
cudaMemcpyDeviceToHost));
// Write result to file
char outputFilename[1024];
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_out.pgm");
sdkSavePGM(outputFilename, hOutputData, width, height);
printf("Wrote '%s'\n", outputFilename);
// Write regression file if necessary
if (checkCmdLineFlag(argc, (const char **) argv, "regression"))
{
// Write file for regression test
sdkWriteFile<float>("./data/regression.dat",
hOutputData,
width*height,
0.0f,
false);
}
else
{
// We need to reload the data from disk,
// because it is inverted upon output
sdkLoadPGM(outputFilename, &hOutputData, &width, &height);
printf("Comparing files\n");
printf("\toutput: <%s>\n", outputFilename);
printf("\treference: <%s>\n", refPath);
testResult = compareData(hOutputData,
hDataRef,
width*height,
MAX_EPSILON_ERROR,
0.15f);
}
checkCudaErrors(cudaFree(dData));
checkCudaErrors(cudaFreeArray(cuArray));
free(imagePath);
free(refPath);
}
|
0fe0e8c21ed39430ab3d17e89cd70fa2dad2984b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _2DCONVOLUTION_KERNEL_H_
#define _2DCONVOLUTION_KERNEL_H_
#include <stdio.h>
#include "2Dconvolution.h"
// Matrix multiplication kernel thread specification
__global__ void ConvolutionKernel(Matrix N, Matrix P)
{
__shared__ float tileNs[BLOCK_SIZE][BLOCK_SIZE];
// get thread indices
int tx = threadIdx.x;
int ty = threadIdx.y;
// get the output indices
int row_o = ty + blockIdx.y * TILE_SIZE;
int col_o = tx + blockIdx.x * TILE_SIZE;
// shift to obtain input indices
int row_i = row_o - KS_DIV_2;
int col_i = col_o - KS_DIV_2;
// Load tile elements
if(row_i >= 0 && row_i < N.height && col_i >= 0 && col_i < N.width)
tileNs[ty][tx] = N.elements[row_i*N.width + col_i];
else
tileNs[ty][tx] = 0.0f;
// Wait until all tile elements are loaded
__syncthreads();
// only compute if you're an output tile element
if(tx < TILE_SIZE && ty < TILE_SIZE){
float pValue = 0.0f;
for(int y=0; y<KERNEL_SIZE; y++)
for(int x=0; x<KERNEL_SIZE; x++)
pValue += Mc[y*KERNEL_SIZE + x] * tileNs[y+ty][x+tx];
// only write values if you are inside matrix bounds
if(row_o < P.height && col_o < P.width)
P.elements[row_o*P.width + col_o] = pValue;
}
}
#endif // #ifndef _2DCONVOLUTION_KERNEL_H_
| 0fe0e8c21ed39430ab3d17e89cd70fa2dad2984b.cu | #ifndef _2DCONVOLUTION_KERNEL_H_
#define _2DCONVOLUTION_KERNEL_H_
#include <stdio.h>
#include "2Dconvolution.h"
// Matrix multiplication kernel thread specification
__global__ void ConvolutionKernel(Matrix N, Matrix P)
{
__shared__ float tileNs[BLOCK_SIZE][BLOCK_SIZE];
// get thread indices
int tx = threadIdx.x;
int ty = threadIdx.y;
// get the output indices
int row_o = ty + blockIdx.y * TILE_SIZE;
int col_o = tx + blockIdx.x * TILE_SIZE;
// shift to obtain input indices
int row_i = row_o - KS_DIV_2;
int col_i = col_o - KS_DIV_2;
// Load tile elements
if(row_i >= 0 && row_i < N.height && col_i >= 0 && col_i < N.width)
tileNs[ty][tx] = N.elements[row_i*N.width + col_i];
else
tileNs[ty][tx] = 0.0f;
// Wait until all tile elements are loaded
__syncthreads();
// only compute if you're an output tile element
if(tx < TILE_SIZE && ty < TILE_SIZE){
float pValue = 0.0f;
for(int y=0; y<KERNEL_SIZE; y++)
for(int x=0; x<KERNEL_SIZE; x++)
pValue += Mc[y*KERNEL_SIZE + x] * tileNs[y+ty][x+tx];
// only write values if you are inside matrix bounds
if(row_o < P.height && col_o < P.width)
P.elements[row_o*P.width + col_o] = pValue;
}
}
#endif // #ifndef _2DCONVOLUTION_KERNEL_H_
|
964e7d7dae30ef6567d3045cb95fcbb7e8c9e263.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <inttypes.h>
uint8_t ctx_key[32];
uint8_t ctx_enckey[32];
uint8_t ctx_deckey[32];
#define AES_BLOCK_SIZE 16
#define THREADS_PER_BLOCK 512
#define F(x) (((x)<<1) ^ ((((x)>>7) & 1) * 0x1b))
#define FD(x) (((x) >> 1) ^ (((x) & 1) ? 0x8d : 0))
// S table
__constant__ static const uint8_t sbox[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
// inv S table
__constant__ static const uint8_t sboxinv[256] = {
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
};
// x-time operation
__device__ uint8_t rj_xtime(uint8_t x){
return (x & 0x80) ? ((x << 1) ^ 0x1b) : (x << 1);
}
// subbyte operation
__device__ void aes_subBytes(uint8_t *buf){
register uint8_t i, b;
for (i = 0; i < 16; ++i){
b = buf[i];
buf[i] = sbox[b];
}
}
// inv subbyte operation
__device__ void aes_subBytes_inv(uint8_t *buf){
register uint8_t i, b;
for (i = 0; i < 16; ++i){
b = buf[i];
buf[i] = sboxinv[b];
}
}
// add round key operation
__device__ void aes_addRoundKey(uint8_t *buf, uint8_t *key){
register uint8_t i = 16;
while (i--){
buf[i] ^= key[i];
}
}
// add round key at beginning
__device__ void aes_addRoundKey_cpy(uint8_t *buf, uint8_t *key, uint8_t *cpk){
register uint8_t i = 16;
while (i--){
buf[i] ^= (cpk[i] = key[i]);
cpk[16+i] = key[16 + i];
}
}
// shift row operation
__device__ void aes_shiftRows(uint8_t *buf){
register uint8_t i, j;
i = buf[1];
buf[1] = buf[5];
buf[5] = buf[9];
buf[9] = buf[13];
buf[13] = i;
i = buf[10];
buf[10] = buf[2];
buf[2] = i;
j = buf[3];
buf[3] = buf[15];
buf[15] = buf[11];
buf[11] = buf[7];
buf[7] = j;
j = buf[14];
buf[14] = buf[6];
buf[6] = j;
}
// inv shift row operation
__device__ void aes_shiftRows_inv(uint8_t *buf){
register uint8_t i, j;
i = buf[1];
buf[1] = buf[13];
buf[13] = buf[9];
buf[9] = buf[5];
buf[5] = i;
i = buf[2];
buf[2] = buf[10];
buf[10] = i;
j = buf[3];
buf[3] = buf[7];
buf[7] = buf[11];
buf[11] = buf[15];
buf[15] = j;
j = buf[6];
buf[6] = buf[14];
buf[14] = j;
}
// mix column operation
__device__ void aes_mixColumns(uint8_t *buf){
register uint8_t i, a, b, c, d, e;
for (i = 0; i < 16; i += 4){
a = buf[i];
b = buf[i + 1];
c = buf[i + 2];
d = buf[i + 3];
e = a ^ b ^ c ^ d;
buf[i] ^= e ^ rj_xtime(a^b);
buf[i+1] ^= e ^ rj_xtime(b^c);
buf[i+2] ^= e ^ rj_xtime(c^d);
buf[i+3] ^= e ^ rj_xtime(d^a);
}
}
// inv mix column operation
__device__ void aes_mixColumns_inv(uint8_t *buf){
register uint8_t i, a, b, c, d, e, x, y, z;
for (i = 0; i < 16; i += 4){
a = buf[i];
b = buf[i + 1];
c = buf[i + 2];
d = buf[i + 3];
e = a ^ b ^ c ^ d;
z = rj_xtime(e);
x = e ^ rj_xtime(rj_xtime(z^a^c));
y = e ^ rj_xtime(rj_xtime(z^b^d));
buf[i] ^= x ^ rj_xtime(a^b);
buf[i+1] ^= y ^ rj_xtime(b^c);
buf[i+2] ^= x ^ rj_xtime(c^d);
buf[i+3] ^= y ^ rj_xtime(d^a);
}
}
// add expand key operation
__device__ __host__ void aes_expandEncKey(uint8_t *k, uint8_t *rc, const uint8_t *sb){
register uint8_t i;
k[0] ^= sb[k[29]] ^ (*rc);
k[1] ^= sb[k[30]];
k[2] ^= sb[k[31]];
k[3] ^= sb[k[28]];
*rc = F( *rc);
for(i = 4; i < 16; i += 4){
k[i] ^= k[i-4];
k[i+1] ^= k[i-3];
k[i+2] ^= k[i-2];
k[i+3] ^= k[i-1];
}
k[16] ^= sb[k[12]];
k[17] ^= sb[k[13]];
k[18] ^= sb[k[14]];
k[19] ^= sb[k[15]];
for(i = 20; i < 32; i += 4){
k[i] ^= k[i-4];
k[i+1] ^= k[i-3];
k[i+2] ^= k[i-2];
k[i+3] ^= k[i-1];
}
}
// inv add expand key operation
__device__ void aes_expandDecKey(uint8_t *k, uint8_t *rc){
uint8_t i;
for(i = 28; i > 16; i -= 4){
k[i+0] ^= k[i-4];
k[i+1] ^= k[i-3];
k[i+2] ^= k[i-2];
k[i+3] ^= k[i-1];
}
k[16] ^= sbox[k[12]];
k[17] ^= sbox[k[13]];
k[18] ^= sbox[k[14]];
k[19] ^= sbox[k[15]];
for(i = 12; i > 0; i -= 4){
k[i+0] ^= k[i-4];
k[i+1] ^= k[i-3];
k[i+2] ^= k[i-2];
k[i+3] ^= k[i-1];
}
*rc = FD(*rc);
k[0] ^= sbox[k[29]] ^ (*rc);
k[1] ^= sbox[k[30]];
k[2] ^= sbox[k[31]];
k[3] ^= sbox[k[28]];
}
// key initition
void aes256_init(uint8_t *k){
uint8_t rcon = 1;
register uint8_t i;
for (i = 0; i < sizeof(ctx_key); i++){
ctx_enckey[i] = ctx_deckey[i] = k[i];
}
for (i = 8;--i;){
aes_expandEncKey(ctx_deckey, &rcon, sbox);
}
}
// aes encrypt algorithm one thread/one block with AES_BLOCK_SIZE
__global__ void aes256_encrypt_ecb(uint8_t *buf_d, unsigned long numbytes, uint8_t *ctx_enckey_d, uint8_t *ctx_key_d){
uint8_t i, rcon;
uint8_t buf_t[AES_BLOCK_SIZE]; // thread buffer
//printf("Thread %d\n", threadIdx.x);
unsigned long offset = (blockIdx.x * THREADS_PER_BLOCK * AES_BLOCK_SIZE) + (threadIdx.x * AES_BLOCK_SIZE);
if (offset >= numbytes) { return; }
memcpy(buf_t, &buf_d[offset], AES_BLOCK_SIZE);
aes_addRoundKey_cpy(buf_t, ctx_enckey_d, ctx_key_d);
for(i = 1, rcon = 1; i < 14; ++i){
aes_subBytes(buf_t);
aes_shiftRows(buf_t);
aes_mixColumns(buf_t);
if( i & 1 ){
aes_addRoundKey( buf_t, &ctx_key_d[16]);
}
else{
aes_expandEncKey(ctx_key_d, &rcon, sbox), aes_addRoundKey(buf_t, ctx_key_d);
}
}
aes_subBytes(buf_t);
aes_shiftRows(buf_t);
aes_expandEncKey(ctx_key_d, &rcon, sbox);
aes_addRoundKey(buf_t, ctx_key_d);
/* copy thread buffer back into global memory */
memcpy(&buf_d[offset], buf_t, AES_BLOCK_SIZE);
__syncthreads();
}
// aes decrypt algorithm
__global__ void aes256_decrypt_ecb(uint8_t *buf_d, unsigned long numbytes, uint8_t *ctx_deckey_d, uint8_t *ctx_key_d){
uint8_t i, rcon;
uint8_t buf_t[AES_BLOCK_SIZE];
unsigned long offset = (blockIdx.x * THREADS_PER_BLOCK * AES_BLOCK_SIZE) + (threadIdx.x * AES_BLOCK_SIZE);
if (offset >= numbytes) { return; }
memcpy(buf_t, &buf_d[offset], AES_BLOCK_SIZE);
aes_addRoundKey_cpy(buf_t, ctx_deckey_d, ctx_key_d);
aes_shiftRows_inv(buf_t);
aes_subBytes_inv(buf_t);
for (i = 14, rcon = 0x80; --i;){
if( ( i & 1 ) ){
aes_expandDecKey(ctx_key_d, &rcon);
aes_addRoundKey(buf_t, &ctx_key_d[16]);
}
else{
aes_addRoundKey(buf_t, ctx_key_d);
}
aes_mixColumns_inv(buf_t);
aes_shiftRows_inv(buf_t);
aes_subBytes_inv(buf_t);
}
aes_addRoundKey( buf_t, ctx_key_d);
/* copy thread back into global memory */
memcpy(&buf_d[offset], buf_t, AES_BLOCK_SIZE);
__syncthreads();
}
// aes encrypt demo
void encryptdemo(uint8_t key[32], uint8_t *buf, unsigned long numbytes){
uint8_t *buf_d;
uint8_t *ctx_key_d, *ctx_enckey_d;
hipMemcpyToSymbol(sbox, sbox, sizeof(uint8_t)*256);
printf("\nBeginning encryption\n");
aes256_init(key);
hipMalloc((void**)&buf_d, numbytes);
hipMalloc((void**)&ctx_enckey_d, sizeof(ctx_enckey));
hipMalloc((void**)&ctx_key_d, sizeof(ctx_key));
hipMemcpy(buf_d, buf, numbytes, hipMemcpyHostToDevice);
hipMemcpy(ctx_enckey_d, ctx_enckey, sizeof(ctx_enckey), hipMemcpyHostToDevice);
hipMemcpy(ctx_key_d, ctx_key, sizeof(ctx_key), hipMemcpyHostToDevice);
dim3 dimBlock(ceil((double)numbytes / (double)(THREADS_PER_BLOCK * AES_BLOCK_SIZE)));
dim3 dimGrid(THREADS_PER_BLOCK);
// printf("Creating %d threads over %d blocks\n", dimBlock.x*dimGrid.x, dimBlock.x);
hipLaunchKernelGGL(( aes256_encrypt_ecb), dim3(dimBlock), dim3(dimGrid), 0, 0, buf_d, numbytes, ctx_enckey_d, ctx_key_d);
hipMemcpy(buf, buf_d, numbytes, hipMemcpyDeviceToHost);
// print(buf);
hipMemcpy(ctx_enckey, ctx_enckey_d, sizeof(ctx_enckey), hipMemcpyDeviceToHost);
hipMemcpy(ctx_key, ctx_key_d, sizeof(ctx_key), hipMemcpyDeviceToHost);
hipFree(buf_d);
hipFree(ctx_key_d);
hipFree(ctx_enckey_d);
}
// aes decrypt demo
void decryptdemo(uint8_t key[32], uint8_t *buf, unsigned long numbytes){
uint8_t *buf_d;
uint8_t *ctx_key_d, *ctx_deckey_d;
hipMemcpyToSymbol(sboxinv, sboxinv, sizeof(uint8_t)*256);
printf("\nBeginning decryption\n");
hipMalloc((void**)&buf_d, numbytes);
hipMalloc((void**)&ctx_deckey_d, sizeof(ctx_deckey));
hipMalloc((void**)&ctx_key_d, sizeof(ctx_key));
hipMemcpy(buf_d, buf, numbytes, hipMemcpyHostToDevice);
hipMemcpy(ctx_deckey_d, ctx_deckey, sizeof(ctx_deckey), hipMemcpyHostToDevice);
hipMemcpy(ctx_key_d, ctx_key, sizeof(ctx_key), hipMemcpyHostToDevice);
dim3 dimBlock(ceil((double)numbytes / (double)(THREADS_PER_BLOCK * AES_BLOCK_SIZE)));
dim3 dimGrid(THREADS_PER_BLOCK);
printf("Creating %d threads over %d blocks\n", dimBlock.x*dimGrid.x, dimBlock.x);
hipLaunchKernelGGL(( aes256_decrypt_ecb), dim3(dimBlock), dim3(dimGrid), 0, 0, buf_d, numbytes, ctx_deckey_d, ctx_key_d);
hipMemcpy(buf, buf_d, numbytes, hipMemcpyDeviceToHost);
hipMemcpy(ctx_deckey, ctx_deckey_d, sizeof(ctx_deckey), hipMemcpyDeviceToHost);
hipMemcpy(ctx_key, ctx_key_d, sizeof(ctx_key), hipMemcpyDeviceToHost);
hipFree(buf_d);
hipFree(ctx_key_d);
hipFree(ctx_deckey_d);
}
__global__ void GPU_init() { }
int main(){
// open file
FILE *file;
uint8_t *buf;
unsigned long numbytes;
char *fname;
clock_t start, enc_time, dec_time, end;
int mili_sec, i;
int padding;
uint8_t key[32];
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess){
printf("Error: %s\n", hipGetErrorString(error_id));
printf("Exiting...\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0){
printf("There are no available device(s) that support CUDA\n");
exit(EXIT_FAILURE);
}
// handle txt file
fname = "input.txt";
file = fopen(fname, "r");
if (file == NULL) {printf("File %s doesn't exist\n", fname); exit(1); }
printf("Opened file %s\n", fname);
fseek(file, 0L, SEEK_END);
numbytes = ftell(file);
printf("Size is %lu\n", numbytes);
// copy file into memory
fseek(file, 0L, SEEK_SET);
buf = (uint8_t*)calloc(numbytes, sizeof(uint8_t));
if(buf == NULL) exit(1);
if (fread(buf, 1, numbytes, file) != numbytes)
{
printf("Unable to read all bytes from file %s\n", fname);
exit(EXIT_FAILURE);
}
fclose(file);
// calculate the padding
padding = numbytes % AES_BLOCK_SIZE;
numbytes += padding;
printf("Padding file with %d bytes for a new size of %lu\n", padding, numbytes);
// generate key
for (i = 0; i < sizeof(key);i++) key[i] = i;
// this is to force nvcc to put the gpu initialization here
hipLaunchKernelGGL(( GPU_init), dim3(1), dim3(1), 0, 0, );
// encryption
start = clock();
encryptdemo(key, buf, numbytes);
end = clock();
printf("time used:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
printf("GPU encryption throughput: %f bytes/second\n", (double)(numbytes) / ((double)(end - start) / CLOCKS_PER_SEC));
// write into file
file = fopen("cipher.txt", "w");
fwrite(buf, 1, numbytes, file);
fclose(file);
// decryption
start = clock();
decryptdemo(key, buf, numbytes);
end = clock();
printf("time used:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
printf("GPU encryption throughput: %f bytes/second\n", (double)(numbytes) / ((double)(end - start) / CLOCKS_PER_SEC));
// write into file
file = fopen("output.txt", "w");
fwrite(buf, 1, numbytes - padding, file);
fclose(file);
free(buf);
return EXIT_SUCCESS;
} | 964e7d7dae30ef6567d3045cb95fcbb7e8c9e263.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <inttypes.h>
uint8_t ctx_key[32];
uint8_t ctx_enckey[32];
uint8_t ctx_deckey[32];
#define AES_BLOCK_SIZE 16
#define THREADS_PER_BLOCK 512
#define F(x) (((x)<<1) ^ ((((x)>>7) & 1) * 0x1b))
#define FD(x) (((x) >> 1) ^ (((x) & 1) ? 0x8d : 0))
// S table
__constant__ static const uint8_t sbox[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
// inv S table
__constant__ static const uint8_t sboxinv[256] = {
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
};
// x-time operation
__device__ uint8_t rj_xtime(uint8_t x){
return (x & 0x80) ? ((x << 1) ^ 0x1b) : (x << 1);
}
// subbyte operation
__device__ void aes_subBytes(uint8_t *buf){
register uint8_t i, b;
for (i = 0; i < 16; ++i){
b = buf[i];
buf[i] = sbox[b];
}
}
// inv subbyte operation
__device__ void aes_subBytes_inv(uint8_t *buf){
register uint8_t i, b;
for (i = 0; i < 16; ++i){
b = buf[i];
buf[i] = sboxinv[b];
}
}
// add round key operation
__device__ void aes_addRoundKey(uint8_t *buf, uint8_t *key){
register uint8_t i = 16;
while (i--){
buf[i] ^= key[i];
}
}
// add round key at beginning
__device__ void aes_addRoundKey_cpy(uint8_t *buf, uint8_t *key, uint8_t *cpk){
register uint8_t i = 16;
while (i--){
buf[i] ^= (cpk[i] = key[i]);
cpk[16+i] = key[16 + i];
}
}
// shift row operation
__device__ void aes_shiftRows(uint8_t *buf){
register uint8_t i, j;
i = buf[1];
buf[1] = buf[5];
buf[5] = buf[9];
buf[9] = buf[13];
buf[13] = i;
i = buf[10];
buf[10] = buf[2];
buf[2] = i;
j = buf[3];
buf[3] = buf[15];
buf[15] = buf[11];
buf[11] = buf[7];
buf[7] = j;
j = buf[14];
buf[14] = buf[6];
buf[6] = j;
}
// inv shift row operation
__device__ void aes_shiftRows_inv(uint8_t *buf){
register uint8_t i, j;
i = buf[1];
buf[1] = buf[13];
buf[13] = buf[9];
buf[9] = buf[5];
buf[5] = i;
i = buf[2];
buf[2] = buf[10];
buf[10] = i;
j = buf[3];
buf[3] = buf[7];
buf[7] = buf[11];
buf[11] = buf[15];
buf[15] = j;
j = buf[6];
buf[6] = buf[14];
buf[14] = j;
}
// mix column operation
__device__ void aes_mixColumns(uint8_t *buf){
register uint8_t i, a, b, c, d, e;
for (i = 0; i < 16; i += 4){
a = buf[i];
b = buf[i + 1];
c = buf[i + 2];
d = buf[i + 3];
e = a ^ b ^ c ^ d;
buf[i] ^= e ^ rj_xtime(a^b);
buf[i+1] ^= e ^ rj_xtime(b^c);
buf[i+2] ^= e ^ rj_xtime(c^d);
buf[i+3] ^= e ^ rj_xtime(d^a);
}
}
// inv mix column operation
__device__ void aes_mixColumns_inv(uint8_t *buf){
register uint8_t i, a, b, c, d, e, x, y, z;
for (i = 0; i < 16; i += 4){
a = buf[i];
b = buf[i + 1];
c = buf[i + 2];
d = buf[i + 3];
e = a ^ b ^ c ^ d;
z = rj_xtime(e);
x = e ^ rj_xtime(rj_xtime(z^a^c));
y = e ^ rj_xtime(rj_xtime(z^b^d));
buf[i] ^= x ^ rj_xtime(a^b);
buf[i+1] ^= y ^ rj_xtime(b^c);
buf[i+2] ^= x ^ rj_xtime(c^d);
buf[i+3] ^= y ^ rj_xtime(d^a);
}
}
// add expand key operation
__device__ __host__ void aes_expandEncKey(uint8_t *k, uint8_t *rc, const uint8_t *sb){
register uint8_t i;
k[0] ^= sb[k[29]] ^ (*rc);
k[1] ^= sb[k[30]];
k[2] ^= sb[k[31]];
k[3] ^= sb[k[28]];
*rc = F( *rc);
for(i = 4; i < 16; i += 4){
k[i] ^= k[i-4];
k[i+1] ^= k[i-3];
k[i+2] ^= k[i-2];
k[i+3] ^= k[i-1];
}
k[16] ^= sb[k[12]];
k[17] ^= sb[k[13]];
k[18] ^= sb[k[14]];
k[19] ^= sb[k[15]];
for(i = 20; i < 32; i += 4){
k[i] ^= k[i-4];
k[i+1] ^= k[i-3];
k[i+2] ^= k[i-2];
k[i+3] ^= k[i-1];
}
}
// inv add expand key operation
__device__ void aes_expandDecKey(uint8_t *k, uint8_t *rc){
uint8_t i;
for(i = 28; i > 16; i -= 4){
k[i+0] ^= k[i-4];
k[i+1] ^= k[i-3];
k[i+2] ^= k[i-2];
k[i+3] ^= k[i-1];
}
k[16] ^= sbox[k[12]];
k[17] ^= sbox[k[13]];
k[18] ^= sbox[k[14]];
k[19] ^= sbox[k[15]];
for(i = 12; i > 0; i -= 4){
k[i+0] ^= k[i-4];
k[i+1] ^= k[i-3];
k[i+2] ^= k[i-2];
k[i+3] ^= k[i-1];
}
*rc = FD(*rc);
k[0] ^= sbox[k[29]] ^ (*rc);
k[1] ^= sbox[k[30]];
k[2] ^= sbox[k[31]];
k[3] ^= sbox[k[28]];
}
// key initition
void aes256_init(uint8_t *k){
uint8_t rcon = 1;
register uint8_t i;
for (i = 0; i < sizeof(ctx_key); i++){
ctx_enckey[i] = ctx_deckey[i] = k[i];
}
for (i = 8;--i;){
aes_expandEncKey(ctx_deckey, &rcon, sbox);
}
}
// aes encrypt algorithm one thread/one block with AES_BLOCK_SIZE
__global__ void aes256_encrypt_ecb(uint8_t *buf_d, unsigned long numbytes, uint8_t *ctx_enckey_d, uint8_t *ctx_key_d){
uint8_t i, rcon;
uint8_t buf_t[AES_BLOCK_SIZE]; // thread buffer
//printf("Thread %d\n", threadIdx.x);
unsigned long offset = (blockIdx.x * THREADS_PER_BLOCK * AES_BLOCK_SIZE) + (threadIdx.x * AES_BLOCK_SIZE);
if (offset >= numbytes) { return; }
memcpy(buf_t, &buf_d[offset], AES_BLOCK_SIZE);
aes_addRoundKey_cpy(buf_t, ctx_enckey_d, ctx_key_d);
for(i = 1, rcon = 1; i < 14; ++i){
aes_subBytes(buf_t);
aes_shiftRows(buf_t);
aes_mixColumns(buf_t);
if( i & 1 ){
aes_addRoundKey( buf_t, &ctx_key_d[16]);
}
else{
aes_expandEncKey(ctx_key_d, &rcon, sbox), aes_addRoundKey(buf_t, ctx_key_d);
}
}
aes_subBytes(buf_t);
aes_shiftRows(buf_t);
aes_expandEncKey(ctx_key_d, &rcon, sbox);
aes_addRoundKey(buf_t, ctx_key_d);
/* copy thread buffer back into global memory */
memcpy(&buf_d[offset], buf_t, AES_BLOCK_SIZE);
__syncthreads();
}
// aes decrypt algorithm
__global__ void aes256_decrypt_ecb(uint8_t *buf_d, unsigned long numbytes, uint8_t *ctx_deckey_d, uint8_t *ctx_key_d){
uint8_t i, rcon;
uint8_t buf_t[AES_BLOCK_SIZE];
unsigned long offset = (blockIdx.x * THREADS_PER_BLOCK * AES_BLOCK_SIZE) + (threadIdx.x * AES_BLOCK_SIZE);
if (offset >= numbytes) { return; }
memcpy(buf_t, &buf_d[offset], AES_BLOCK_SIZE);
aes_addRoundKey_cpy(buf_t, ctx_deckey_d, ctx_key_d);
aes_shiftRows_inv(buf_t);
aes_subBytes_inv(buf_t);
for (i = 14, rcon = 0x80; --i;){
if( ( i & 1 ) ){
aes_expandDecKey(ctx_key_d, &rcon);
aes_addRoundKey(buf_t, &ctx_key_d[16]);
}
else{
aes_addRoundKey(buf_t, ctx_key_d);
}
aes_mixColumns_inv(buf_t);
aes_shiftRows_inv(buf_t);
aes_subBytes_inv(buf_t);
}
aes_addRoundKey( buf_t, ctx_key_d);
/* copy thread back into global memory */
memcpy(&buf_d[offset], buf_t, AES_BLOCK_SIZE);
__syncthreads();
}
// aes encrypt demo
void encryptdemo(uint8_t key[32], uint8_t *buf, unsigned long numbytes){
uint8_t *buf_d;
uint8_t *ctx_key_d, *ctx_enckey_d;
cudaMemcpyToSymbol(sbox, sbox, sizeof(uint8_t)*256);
printf("\nBeginning encryption\n");
aes256_init(key);
cudaMalloc((void**)&buf_d, numbytes);
cudaMalloc((void**)&ctx_enckey_d, sizeof(ctx_enckey));
cudaMalloc((void**)&ctx_key_d, sizeof(ctx_key));
cudaMemcpy(buf_d, buf, numbytes, cudaMemcpyHostToDevice);
cudaMemcpy(ctx_enckey_d, ctx_enckey, sizeof(ctx_enckey), cudaMemcpyHostToDevice);
cudaMemcpy(ctx_key_d, ctx_key, sizeof(ctx_key), cudaMemcpyHostToDevice);
dim3 dimBlock(ceil((double)numbytes / (double)(THREADS_PER_BLOCK * AES_BLOCK_SIZE)));
dim3 dimGrid(THREADS_PER_BLOCK);
// printf("Creating %d threads over %d blocks\n", dimBlock.x*dimGrid.x, dimBlock.x);
aes256_encrypt_ecb<<<dimBlock, dimGrid>>>(buf_d, numbytes, ctx_enckey_d, ctx_key_d);
cudaMemcpy(buf, buf_d, numbytes, cudaMemcpyDeviceToHost);
// print(buf);
cudaMemcpy(ctx_enckey, ctx_enckey_d, sizeof(ctx_enckey), cudaMemcpyDeviceToHost);
cudaMemcpy(ctx_key, ctx_key_d, sizeof(ctx_key), cudaMemcpyDeviceToHost);
cudaFree(buf_d);
cudaFree(ctx_key_d);
cudaFree(ctx_enckey_d);
}
// aes decrypt demo
void decryptdemo(uint8_t key[32], uint8_t *buf, unsigned long numbytes){
uint8_t *buf_d;
uint8_t *ctx_key_d, *ctx_deckey_d;
cudaMemcpyToSymbol(sboxinv, sboxinv, sizeof(uint8_t)*256);
printf("\nBeginning decryption\n");
cudaMalloc((void**)&buf_d, numbytes);
cudaMalloc((void**)&ctx_deckey_d, sizeof(ctx_deckey));
cudaMalloc((void**)&ctx_key_d, sizeof(ctx_key));
cudaMemcpy(buf_d, buf, numbytes, cudaMemcpyHostToDevice);
cudaMemcpy(ctx_deckey_d, ctx_deckey, sizeof(ctx_deckey), cudaMemcpyHostToDevice);
cudaMemcpy(ctx_key_d, ctx_key, sizeof(ctx_key), cudaMemcpyHostToDevice);
dim3 dimBlock(ceil((double)numbytes / (double)(THREADS_PER_BLOCK * AES_BLOCK_SIZE)));
dim3 dimGrid(THREADS_PER_BLOCK);
printf("Creating %d threads over %d blocks\n", dimBlock.x*dimGrid.x, dimBlock.x);
aes256_decrypt_ecb<<<dimBlock, dimGrid>>>(buf_d, numbytes, ctx_deckey_d, ctx_key_d);
cudaMemcpy(buf, buf_d, numbytes, cudaMemcpyDeviceToHost);
cudaMemcpy(ctx_deckey, ctx_deckey_d, sizeof(ctx_deckey), cudaMemcpyDeviceToHost);
cudaMemcpy(ctx_key, ctx_key_d, sizeof(ctx_key), cudaMemcpyDeviceToHost);
cudaFree(buf_d);
cudaFree(ctx_key_d);
cudaFree(ctx_deckey_d);
}
__global__ void GPU_init() { }
int main(){
// open file
FILE *file;
uint8_t *buf;
unsigned long numbytes;
char *fname;
clock_t start, enc_time, dec_time, end;
int mili_sec, i;
int padding;
uint8_t key[32];
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess){
printf("Error: %s\n", cudaGetErrorString(error_id));
printf("Exiting...\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0){
printf("There are no available device(s) that support CUDA\n");
exit(EXIT_FAILURE);
}
// handle txt file
fname = "input.txt";
file = fopen(fname, "r");
if (file == NULL) {printf("File %s doesn't exist\n", fname); exit(1); }
printf("Opened file %s\n", fname);
fseek(file, 0L, SEEK_END);
numbytes = ftell(file);
printf("Size is %lu\n", numbytes);
// copy file into memory
fseek(file, 0L, SEEK_SET);
buf = (uint8_t*)calloc(numbytes, sizeof(uint8_t));
if(buf == NULL) exit(1);
if (fread(buf, 1, numbytes, file) != numbytes)
{
printf("Unable to read all bytes from file %s\n", fname);
exit(EXIT_FAILURE);
}
fclose(file);
// calculate the padding
padding = numbytes % AES_BLOCK_SIZE;
numbytes += padding;
printf("Padding file with %d bytes for a new size of %lu\n", padding, numbytes);
// generate key
for (i = 0; i < sizeof(key);i++) key[i] = i;
// this is to force nvcc to put the gpu initialization here
GPU_init<<<1, 1>>>();
// encryption
start = clock();
encryptdemo(key, buf, numbytes);
end = clock();
printf("time used:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
printf("GPU encryption throughput: %f bytes/second\n", (double)(numbytes) / ((double)(end - start) / CLOCKS_PER_SEC));
// write into file
file = fopen("cipher.txt", "w");
fwrite(buf, 1, numbytes, file);
fclose(file);
// decryption
start = clock();
decryptdemo(key, buf, numbytes);
end = clock();
printf("time used:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
printf("GPU encryption throughput: %f bytes/second\n", (double)(numbytes) / ((double)(end - start) / CLOCKS_PER_SEC));
// write into file
file = fopen("output.txt", "w");
fwrite(buf, 1, numbytes - padding, file);
fclose(file);
free(buf);
return EXIT_SUCCESS;
} |
36b765150ac27610892c4a988c4e0e5827606dbc.hip | // !!! This is a file automatically generated by hipify!!!
// generated by gen_batch_cuda_conv_bias_kern_impls.py
#include "../batch_conv_bias_int8_gemm_ncdiv4hw4_ldg_128.cuinl"
template void megdnn::cuda::batch_conv_bias::
do_batch_conv_bias_int8_gemm_ncdiv4hw4_ldg_128<
PerChannelBiasVisitor,
IConvEpilogue<Activation<
megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias,
IConvEpilogue<Activation<
megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>>
epilogue,
const ConvParam& param, float alpha, float beta, hipStream_t stream);
| 36b765150ac27610892c4a988c4e0e5827606dbc.cu | // generated by gen_batch_cuda_conv_bias_kern_impls.py
#include "../batch_conv_bias_int8_gemm_ncdiv4hw4_ldg_128.cuinl"
template void megdnn::cuda::batch_conv_bias::
do_batch_conv_bias_int8_gemm_ncdiv4hw4_ldg_128<
PerChannelBiasVisitor,
IConvEpilogue<Activation<
megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias,
IConvEpilogue<Activation<
megdnn::param_enumv::BatchConvBias::NonlineMode::IDENTITY>>
epilogue,
const ConvParam& param, float alpha, float beta, cudaStream_t stream);
|
88e54de0a7ee8a4865ac59929d410639830629f9.hip | // !!! This is a file automatically generated by hipify!!!
// Release date: June 2015
// Author: Taewoo Lee, ([email protected])
//
// Copyright (C) 2015 Taewoo Lee
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// Out: getDeviceInfo -> (execute) -> deviceInfo.[bin,txt]
//
// Reference:
// [1] Taewoo Lee, Sukmoon Chang, and Dongsuk Yook, "Parallel SRP-PHAT for
// GPUs," Computer Speech and Language, vol. 35, pp. 1-13, Jan. 2016.
//
#include <helper_cuda.h>
#include <stdio.h>
#include <assert.h>
int main(void) {
hipDeviceProp_t deviceProp;
int dev, deviceCount=0;
int driverVersion=0, runtimeVersion=0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
printf("hipGetDeviceCount returned %d\n-> %s\n",
(int)error_id, hipGetErrorString(error_id));
exit(EXIT_FAILURE);
}
if (deviceCount==0) {
printf("There are no available device(s) that support CUDA\n");
exit(EXIT_FAILURE);
}
else if (deviceCount>1) {
printf("There are too many available device(s)\n");
exit(EXIT_FAILURE);
}
else {
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
for (dev=0; dev<deviceCount; ++dev) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
FILE *fp= fopen("deviceInfo.bin","wb");
int n= fwrite(&deviceProp,sizeof(hipDeviceProp_t),1,fp);
assert(n!=0);
fclose(fp);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",\
driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000,\
(runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", \
deviceProp.major, deviceProp.minor);
char msg[256];
SPRINTF(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf("%s", msg);
printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf(" GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n",
deviceProp.maxTexture1D , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n",
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch);
printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment);
printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);
printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No");
printf(" Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled");
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)");
#endif
printf(" Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No");
printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n", deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID);
const char *sComputeMode[] =
{
"Default (multiple host threads can use ::hipSetDevice() with device simultaneously)",
"Exclusive (only one host thread in one process is able to use ::hipSetDevice() with this device)",
"Prohibited (no host thread can use ::hipSetDevice() with this device)",
"Exclusive Process (many threads in one process is able to use ::hipSetDevice() with this device)",
"Unknown",
NULL
};
printf(" Compute Mode: ");
printf("< %s >\n", sComputeMode[deviceProp.computeMode]);
}
std::string sProfileString = " deviceQuery, CUDA Driver = CUDART";
char cTemp[16];
sProfileString += ", CUDA Driver Version = ";
sprintf(cTemp, "%d.%d", driverVersion/1000, (driverVersion%100)/10);
sProfileString += cTemp;
sProfileString += ", CUDA Runtime Version = ";
sprintf(cTemp, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);
sProfileString += cTemp;
sProfileString += ", NumDevs = ";
sprintf(cTemp, "%d", deviceCount);
sProfileString += cTemp;
for (dev=0; dev<deviceCount; ++dev) {
sprintf(cTemp, ", Device%d = ", dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
sProfileString += cTemp;
sProfileString += deviceProp.name;
}
sProfileString += "\n";
printf("%s", sProfileString.c_str());
hipDeviceReset();
exit(EXIT_SUCCESS);
return 1;
}
| 88e54de0a7ee8a4865ac59929d410639830629f9.cu | // Release date: June 2015
// Author: Taewoo Lee, ([email protected])
//
// Copyright (C) 2015 Taewoo Lee
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// Out: getDeviceInfo -> (execute) -> deviceInfo.[bin,txt]
//
// Reference:
// [1] Taewoo Lee, Sukmoon Chang, and Dongsuk Yook, "Parallel SRP-PHAT for
// GPUs," Computer Speech and Language, vol. 35, pp. 1-13, Jan. 2016.
//
#include <helper_cuda.h>
#include <stdio.h>
#include <assert.h>
int main(void) {
cudaDeviceProp deviceProp;
int dev, deviceCount=0;
int driverVersion=0, runtimeVersion=0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
printf("cudaGetDeviceCount returned %d\n-> %s\n",
(int)error_id, cudaGetErrorString(error_id));
exit(EXIT_FAILURE);
}
if (deviceCount==0) {
printf("There are no available device(s) that support CUDA\n");
exit(EXIT_FAILURE);
}
else if (deviceCount>1) {
printf("There are too many available device(s)\n");
exit(EXIT_FAILURE);
}
else {
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
for (dev=0; dev<deviceCount; ++dev) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
FILE *fp= fopen("deviceInfo.bin","wb");
int n= fwrite(&deviceProp,sizeof(cudaDeviceProp),1,fp);
assert(n!=0);
fclose(fp);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",\
driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000,\
(runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", \
deviceProp.major, deviceProp.minor);
char msg[256];
SPRINTF(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf("%s", msg);
printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf(" GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n",
deviceProp.maxTexture1D , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n",
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch);
printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment);
printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);
printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No");
printf(" Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled");
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)");
#endif
printf(" Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No");
printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n", deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID);
const char *sComputeMode[] =
{
"Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)",
"Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)",
"Prohibited (no host thread can use ::cudaSetDevice() with this device)",
"Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)",
"Unknown",
NULL
};
printf(" Compute Mode: ");
printf("< %s >\n", sComputeMode[deviceProp.computeMode]);
}
std::string sProfileString = " deviceQuery, CUDA Driver = CUDART";
char cTemp[16];
sProfileString += ", CUDA Driver Version = ";
sprintf(cTemp, "%d.%d", driverVersion/1000, (driverVersion%100)/10);
sProfileString += cTemp;
sProfileString += ", CUDA Runtime Version = ";
sprintf(cTemp, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);
sProfileString += cTemp;
sProfileString += ", NumDevs = ";
sprintf(cTemp, "%d", deviceCount);
sProfileString += cTemp;
for (dev=0; dev<deviceCount; ++dev) {
sprintf(cTemp, ", Device%d = ", dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
sProfileString += cTemp;
sProfileString += deviceProp.name;
}
sProfileString += "\n";
printf("%s", sProfileString.c_str());
cudaDeviceReset();
exit(EXIT_SUCCESS);
return 1;
}
|
e45d700c6724a552d63689f44ae52c802be522b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Program to solve Laplace equation on a regular 3D grid
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 16
#define BLOCK_Y 8
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include <laplace3d_kernel.h>
////////////////////////////////////////////////////////////////////////
// declare Gold routine
////////////////////////////////////////////////////////////////////////
__global__ void reduction(float *d_sum, float* d_u1, float* d_u2)
{
// dynamically allocated shared memory
extern __shared__ float temp[];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// first, each thread loads data into shared memory
temp[threadIdx.x] = d_u1[tid] - d_u2[tid];
// next, we perform binary tree reduction
for (int d = blockDim.x>>1; d > 0; d >>= 1) {
__syncthreads(); // ensure previous step completed
if (threadIdx.x<d) temp[threadIdx.x] += temp[tid+d];
}
// finally, first thread puts result into global memory
if (threadIdx.x==0) d_sum[blockIdx.x] = temp[0];
}
void Gold_laplace3d(int NX, int NY, int NZ, float* h_u1, float* h_u2);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// 'h_' prefix - CPU (host) memory space
int NX=255, NY=255, NZ=255, REPEAT=10,
bx, by, i, j, k, ind;
float *h_u1, *h_u2, *h_u3, *h_foo, err;
// 'd_' prefix - GPU (device) memory space
float *d_u1, *d_u2, *d_foo;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate memory for arrays
h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u3 = (float *)malloc(sizeof(float)*NX*NY*NZ);
checkCudaErrors( hipMalloc((void **)&d_u1, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_u2, sizeof(float)*NX*NY*NZ) );
// initialise u1
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
hipEventRecord(start);
checkCudaErrors( hipMemcpy(d_u1, h_u1, sizeof(float)*NX*NY*NZ,
hipMemcpyHostToDevice) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\nCopy u1 to device: %.1f (ms) \n", milli);
// Set up the execution configuration
bx = 1 + (NX-1)/BLOCK_X;
by = 1 + (NY-1)/BLOCK_Y;
dim3 dimGrid(bx,by);
dim3 dimBlock(BLOCK_X,BLOCK_Y);
// printf("\n dimGrid = %d %d %d \n",dimGrid.x,dimGrid.y,dimGrid.z);
// printf(" dimBlock = %d %d %d \n",dimBlock.x,dimBlock.y,dimBlock.z);
// Execute GPU kernel
hipEventRecord(start);
for (i = 1; i <= REPEAT; ++i) {
hipLaunchKernelGGL(( GPU_laplace3d), dim3(dimGrid), dim3(dimBlock), 0, 0, NX, NY, NZ, d_u1, d_u2);
getLastCudaError("GPU_laplace3d execution failed\n");
d_foo = d_u1; d_u1 = d_u2; d_u2 = d_foo; // swap d_u1 and d_u2
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\n%dx GPU_laplace3d_naive: %.1f (ms) \n", REPEAT, milli);
// Read back GPU results
hipEventRecord(start);
checkCudaErrors( hipMemcpy(h_u2, d_u1, sizeof(float)*NX*NY*NZ,
hipMemcpyDeviceToHost) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\nCopy u2 to host: %.1f (ms) \n", milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// Gold treatment
hipEventRecord(start);
for (int i = 1; i <= REPEAT; ++i) {
Gold_laplace3d(NX, NY, NZ, h_u1, h_u3);
h_foo = h_u1; h_u1 = h_u3; h_u3 = h_foo; // swap h_u1 and h_u3
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\n%dx Gold_laplace3d: %.1f (ms) \n \n", REPEAT, milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// error check
err = 0.0;
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]);
}
}
}
printf("rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ)));
// Release GPU and CPU memory
checkCudaErrors( hipFree(d_u1) );
checkCudaErrors( hipFree(d_u2) );
free(h_u1);
free(h_u2);
free(h_u3);
hipDeviceReset();
}
| e45d700c6724a552d63689f44ae52c802be522b5.cu | //
// Program to solve Laplace equation on a regular 3D grid
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 16
#define BLOCK_Y 8
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include <laplace3d_kernel.h>
////////////////////////////////////////////////////////////////////////
// declare Gold routine
////////////////////////////////////////////////////////////////////////
__global__ void reduction(float *d_sum, float* d_u1, float* d_u2)
{
// dynamically allocated shared memory
extern __shared__ float temp[];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// first, each thread loads data into shared memory
temp[threadIdx.x] = d_u1[tid] - d_u2[tid];
// next, we perform binary tree reduction
for (int d = blockDim.x>>1; d > 0; d >>= 1) {
__syncthreads(); // ensure previous step completed
if (threadIdx.x<d) temp[threadIdx.x] += temp[tid+d];
}
// finally, first thread puts result into global memory
if (threadIdx.x==0) d_sum[blockIdx.x] = temp[0];
}
void Gold_laplace3d(int NX, int NY, int NZ, float* h_u1, float* h_u2);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// 'h_' prefix - CPU (host) memory space
int NX=255, NY=255, NZ=255, REPEAT=10,
bx, by, i, j, k, ind;
float *h_u1, *h_u2, *h_u3, *h_foo, err;
// 'd_' prefix - GPU (device) memory space
float *d_u1, *d_u2, *d_foo;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate memory for arrays
h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u3 = (float *)malloc(sizeof(float)*NX*NY*NZ);
checkCudaErrors( cudaMalloc((void **)&d_u1, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_u2, sizeof(float)*NX*NY*NZ) );
// initialise u1
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
cudaEventRecord(start);
checkCudaErrors( cudaMemcpy(d_u1, h_u1, sizeof(float)*NX*NY*NZ,
cudaMemcpyHostToDevice) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\nCopy u1 to device: %.1f (ms) \n", milli);
// Set up the execution configuration
bx = 1 + (NX-1)/BLOCK_X;
by = 1 + (NY-1)/BLOCK_Y;
dim3 dimGrid(bx,by);
dim3 dimBlock(BLOCK_X,BLOCK_Y);
// printf("\n dimGrid = %d %d %d \n",dimGrid.x,dimGrid.y,dimGrid.z);
// printf(" dimBlock = %d %d %d \n",dimBlock.x,dimBlock.y,dimBlock.z);
// Execute GPU kernel
cudaEventRecord(start);
for (i = 1; i <= REPEAT; ++i) {
GPU_laplace3d<<<dimGrid, dimBlock>>>(NX, NY, NZ, d_u1, d_u2);
getLastCudaError("GPU_laplace3d execution failed\n");
d_foo = d_u1; d_u1 = d_u2; d_u2 = d_foo; // swap d_u1 and d_u2
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\n%dx GPU_laplace3d_naive: %.1f (ms) \n", REPEAT, milli);
// Read back GPU results
cudaEventRecord(start);
checkCudaErrors( cudaMemcpy(h_u2, d_u1, sizeof(float)*NX*NY*NZ,
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\nCopy u2 to host: %.1f (ms) \n", milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// Gold treatment
cudaEventRecord(start);
for (int i = 1; i <= REPEAT; ++i) {
Gold_laplace3d(NX, NY, NZ, h_u1, h_u3);
h_foo = h_u1; h_u1 = h_u3; h_u3 = h_foo; // swap h_u1 and h_u3
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\n%dx Gold_laplace3d: %.1f (ms) \n \n", REPEAT, milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// error check
err = 0.0;
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]);
}
}
}
printf("rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ)));
// Release GPU and CPU memory
checkCudaErrors( cudaFree(d_u1) );
checkCudaErrors( cudaFree(d_u2) );
free(h_u1);
free(h_u2);
free(h_u3);
cudaDeviceReset();
}
|
b40c4fd7b4b07c36a2be82f8aba67412c30c89c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_stencil37_hack2_cp_slices(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int d_xpitch,int d_ypitch,int d_zpitch,int s_xpitch,int s_ypitch, int s_zpitch, int n_rows, int n_cols,int n_slices, int tile_x,int tile_y, int tile_z){
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy slices: begin!\n");
printf("copy slices: n_cols=%d,n_rows=%d,n_slices=%d\n",n_cols,n_rows,n_slices);
printf("copy slices: gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy slices: blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy slices: tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = tile_y * blockIdx.y;
int base_global_col = blockDim.x * blockIdx.x;
//int area = n_rows*n_cols;
//int base_global_idx = base_global_slice*area + base_global_row * n_cols + base_global_col;
//int d_area = n_rows*d_xpitch;
//int s_area = n_rows*n_cols;
int d_area = d_ypitch*d_xpitch;
int s_area = s_ypitch*s_xpitch;
int base_global_idx = base_global_slice*d_area + base_global_row * d_xpitch + base_global_col;
int nextSlice = base_global_slice+1;
bool legalNextSlice = (nextSlice<n_slices);
int tx = threadIdx.x;
bool legalCurCol = (base_global_col + tx)<n_cols;
for(int ty=0;ty<tile_y;++ty){
bool legalCurRow = (base_global_row + ty)<n_rows;
//int s_idx = blockIdx.z*s_area*2 + (base_global_row+ty)*n_cols + base_global_col+tx ;
//int dst_idx = base_global_idx + ty*n_cols+tx;
int s_idx = blockIdx.z*s_area*2 + (base_global_row+ty)*s_xpitch + base_global_col+tx ;
int d_idx = base_global_idx + ty*d_xpitch+tx;
if(legalCurCol&&legalCurRow){
shared_slices[s_idx] = dst[d_idx];
}
if(legalNextSlice&&legalCurCol&&legalCurRow){
shared_slices[s_idx+s_area] = dst[d_idx+d_area];
}
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.z ==0 && blockIdx.y==0 && blockIdx.x==0 ){
// printf("shared_slices: addr:%d, val = %f\n",n_cols*n_rows + threadIdx.x,shared_slices[n_cols*n_rows+threadIdx.x]);
if(threadIdx.x==0||threadIdx.x==1||threadIdx.x==2){
int addr = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x;
int addr1 = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x+s_xpitch;
int addr2 = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x+s_xpitch*2;
int daddr = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x;
int daddr1 = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x+d_xpitch;
int daddr2 = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x+d_xpitch*2;
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr,dst[daddr]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr1,dst[daddr1]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr2,dst[daddr2]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr,shared_slices[addr]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr1,shared_slices[addr1]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr2,shared_slices[addr2]);
}
}
#endif
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy slices end!\n");
}
#endif
} | b40c4fd7b4b07c36a2be82f8aba67412c30c89c8.cu | #include "includes.h"
__global__ void gpu_stencil37_hack2_cp_slices(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int d_xpitch,int d_ypitch,int d_zpitch,int s_xpitch,int s_ypitch, int s_zpitch, int n_rows, int n_cols,int n_slices, int tile_x,int tile_y, int tile_z){
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy slices: begin!\n");
printf("copy slices: n_cols=%d,n_rows=%d,n_slices=%d\n",n_cols,n_rows,n_slices);
printf("copy slices: gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy slices: blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy slices: tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = tile_y * blockIdx.y;
int base_global_col = blockDim.x * blockIdx.x;
//int area = n_rows*n_cols;
//int base_global_idx = base_global_slice*area + base_global_row * n_cols + base_global_col;
//int d_area = n_rows*d_xpitch;
//int s_area = n_rows*n_cols;
int d_area = d_ypitch*d_xpitch;
int s_area = s_ypitch*s_xpitch;
int base_global_idx = base_global_slice*d_area + base_global_row * d_xpitch + base_global_col;
int nextSlice = base_global_slice+1;
bool legalNextSlice = (nextSlice<n_slices);
int tx = threadIdx.x;
bool legalCurCol = (base_global_col + tx)<n_cols;
for(int ty=0;ty<tile_y;++ty){
bool legalCurRow = (base_global_row + ty)<n_rows;
//int s_idx = blockIdx.z*s_area*2 + (base_global_row+ty)*n_cols + base_global_col+tx ;
//int dst_idx = base_global_idx + ty*n_cols+tx;
int s_idx = blockIdx.z*s_area*2 + (base_global_row+ty)*s_xpitch + base_global_col+tx ;
int d_idx = base_global_idx + ty*d_xpitch+tx;
if(legalCurCol&&legalCurRow){
shared_slices[s_idx] = dst[d_idx];
}
if(legalNextSlice&&legalCurCol&&legalCurRow){
shared_slices[s_idx+s_area] = dst[d_idx+d_area];
}
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.z ==0 && blockIdx.y==0 && blockIdx.x==0 ){
// printf("shared_slices: addr:%d, val = %f\n",n_cols*n_rows + threadIdx.x,shared_slices[n_cols*n_rows+threadIdx.x]);
if(threadIdx.x==0||threadIdx.x==1||threadIdx.x==2){
int addr = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x;
int addr1 = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x+s_xpitch;
int addr2 = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x+s_xpitch*2;
int daddr = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x;
int daddr1 = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x+d_xpitch;
int daddr2 = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x+d_xpitch*2;
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr,dst[daddr]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr1,dst[daddr1]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr2,dst[daddr2]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr,shared_slices[addr]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr1,shared_slices[addr1]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr2,shared_slices[addr2]);
}
}
#endif
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy slices end!\n");
}
#endif
} |
3af61530133d0b81d29e723b4cd6d84db9ac7e11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cassert>
#include <hip/hip_fp16.h>
#include <hipcub/hipcub.hpp>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/fpgeneric.h"
#include "contrib_ops/cuda/bert/packed_attention_impl.h"
#include "contrib_ops/cuda/bert/attention_softmax.h"
#include "contrib_ops/cuda/bert/transformer_common.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/cross_attention/fmha_cross_attention.h"
#include "contrib_ops/cuda/bert/bert_padding.h"
#include "contrib_ops/cuda/transformers/dump_cuda_tensor.h"
#include "contrib_ops/cuda/bert/cutlass_fmha/memory_efficient_attention.h"
#include "contrib_ops/cuda/bert/rotary_embedding_util.h"
using namespace onnxruntime::cuda;
using namespace onnxruntime::contrib::attention_softmax_cuda;
#define CHECK_CUDA(expr) CUDA_RETURN_IF_ERROR(expr)
namespace onnxruntime {
namespace contrib {
namespace cuda {
constexpr size_t kCUDAMemoryAlignment = 256;
constexpr int32_t kMAX_THREADS_PER_BLOCK = 256;
size_t GetAttentionScratchSize(
size_t element_size,
size_t batch_size,
size_t num_heads,
size_t sequence_length) {
const size_t bytes = element_size * batch_size * num_heads * sequence_length * sequence_length;
return ((bytes + kCUDAMemoryAlignment - 1) / kCUDAMemoryAlignment) * kCUDAMemoryAlignment;
}
size_t GetAttentionWorkspaceSize(
size_t element_size,
size_t batch_size,
size_t num_heads,
size_t qk_head_size,
size_t v_head_size,
size_t sequence_length,
void* fused_runner,
bool use_memory_efficient_attention,
bool no_qkv_workspace) {
// Note that q, k and v might need alignment for fused attention kernels.
const size_t qkv_bytes = no_qkv_workspace ? 0 : (element_size * batch_size * num_heads * sequence_length * (qk_head_size + qk_head_size + v_head_size));
if (fused_runner != nullptr) {
return qkv_bytes;
}
#if USE_FLASH_ATTENTION
if (use_memory_efficient_attention) {
size_t fmha_buffer_bytes = 0;
if (MemoryEfficientAttentionParams::need_workspace(v_head_size, element_size == sizeof(float))) {
fmha_buffer_bytes = batch_size * sequence_length * num_heads * v_head_size * sizeof(float);
}
return qkv_bytes + fmha_buffer_bytes;
}
#else
ORT_UNUSED_PARAMETER(use_memory_efficient_attention);
#endif
return qkv_bytes + 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length);
}
// Grid: (S, B)
// Block: 256
// For unfused PackedAttention
// Input: Tx3xNxH
// Output: 3xBxNxSxH
// Where:
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void AddBiasTransposeQKVPacked(
const T* input,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* q,
T* k,
T* v,
const int32_t* token_offset,
int32_t token_count) {
int s = blockIdx.x;
int b = blockIdx.y;
int S = gridDim.x;
const int packing_token_idx = b * S + s;
const int padding_token_idx = token_offset[packing_token_idx];
b = padding_token_idx / S;
s = padding_token_idx - b * S;
input += packing_token_idx * N * (H_QK + H_QK + H_V);
int k_offset = N * H_QK;
int v_offset = N * H_QK + N * H_QK;
q += (b * N * S + s) * H_QK;
k += (b * N * S + s) * H_QK;
v += (b * N * S + s) * H_V;
if (packing_token_idx < token_count) {
for (int i = threadIdx.x; i < N * H_QK; i += blockDim.x) {
int h = i % H_QK;
int n = i / H_QK;
q[n * S * H_QK + h] = input[i] + biases[i];
k[n * S * H_QK + h] = input[i + k_offset] + biases[i + k_offset];
}
for (int i = threadIdx.x; i < N * H_V; i += blockDim.x) {
int h = i % H_V;
int n = i / H_V;
v[n * S * H_V + h] = input[i + v_offset] + biases[i + v_offset];
}
} else {
for (int i = threadIdx.x; i < N * H_QK; i += blockDim.x) {
int h = i % H_QK;
int n = i / H_QK;
q[n * S * H_QK + h] = biases[i];
k[n * S * H_QK + h] = biases[i + k_offset];
}
for (int i = threadIdx.x; i < N * H_V; i += blockDim.x) {
int h = i % H_V;
int n = i / H_V;
v[n * S * H_V + h] = biases[i + v_offset];
}
}
}
// Grid: (T)
// Block: 256
// For memory efficient fMHA from CUTLASS. For future use, doesn't support fMHA from CUTLASS yet.
// Input: Tx3xNxH
// Output: 3xTxNxH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void AddBiasTransposeQKVPackedCutlass(
const T* input,
const T* biases,
int32_t D_QK,
int32_t D_V,
T* q,
T* k,
T* v,
int32_t token_count) {
int token_idx = blockIdx.x;
input += token_idx * (D_QK + D_QK + D_V);
q += token_idx * D_QK;
k += token_idx * D_QK;
v += token_idx * D_V;
if (token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
q[i] = input[i] + biases[i];
k[i] = input[D_QK + i] + biases[D_QK + i];
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
v[i] = input[D_QK + D_QK + i] + biases[D_QK + D_QK + i];
}
}
}
// Grid: (T)
// Block: 256
// For fMHA from TRT
// Input: Tx3xNxH
// Output: TxNx3xH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void AddBiasTransposeQKVPackedTRT(
const T* input,
const T* biases,
int32_t N,
int32_t H,
T* output) {
int token_idx = blockIdx.x;
int Hx3 = H * 3;
int NxH = N * H;
int NxHx2 = N * H + N * H;
int offset = token_idx * N * Hx3;
input += offset;
output += offset;
for (int i = threadIdx.x; i < N * H; i += blockDim.x) {
int n = i / H;
int h = i % H;
output[n * Hx3 + h] = input[i] + biases[i];
output[n * Hx3 + H + h] = input[i + NxH] + biases[i + NxH];
output[n * Hx3 + H + H + h] = input[i + NxHx2] + biases[i + NxHx2];
}
}
template <typename T>
void InvokeAddBiasTranspose(
const T* input, const T* biases, T* output,
const int batch_size, const int sequence_length,
const int num_heads, const int qk_head_size, const int v_head_size,
AttentionQkvFormat format, const int32_t* token_offset, int32_t token_count,
hipStream_t stream) {
if (format == AttentionQkvFormat::Q_K_V_BNSH) {
const dim3 grid(sequence_length, batch_size);
hipLaunchKernelGGL(( AddBiasTransposeQKVPacked<T>), dim3(grid), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
input,
biases,
num_heads,
qk_head_size,
v_head_size,
output,
output + batch_size * sequence_length * num_heads * qk_head_size,
output + 2 * batch_size * sequence_length * num_heads * qk_head_size,
token_offset,
token_count);
} else if (format == AttentionQkvFormat::Q_K_V_BSNH) {
const dim3 grid(token_count);
hipLaunchKernelGGL(( AddBiasTransposeQKVPackedCutlass<T>), dim3(grid), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
input,
biases,
num_heads * qk_head_size,
num_heads * v_head_size,
output,
output + token_count * num_heads * qk_head_size,
output + 2 * token_count * num_heads * qk_head_size,
token_count);
} else {
ORT_ENFORCE(format == AttentionQkvFormat::QKV_BSN3H);
const dim3 grid(token_count);
hipLaunchKernelGGL(( AddBiasTransposeQKVPackedTRT<T>), dim3(grid), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
input,
biases,
num_heads,
qk_head_size,
output);
}
}
template <typename T>
struct T4;
template <>
struct T4<float> {
using Type = float4;
};
template <>
struct T4<half> {
using Type = Half4;
};
template <typename T>
struct T2;
template <>
struct T2<float> {
using Type = float2;
};
template <>
struct T2<half> {
using Type = half2;
};
template <typename T>
void LaunchAddBiasTranspose(
const T* input, const T* biases, T* output,
const int batch_size, const int sequence_length,
const int num_heads, const int qk_head_size, const int v_head_size,
AttentionQkvFormat format, const int32_t* token_offset, int32_t token_count,
hipStream_t stream) {
if (0 == (qk_head_size & 3) && 0 == (v_head_size & 3)) {
using T4Type = typename T4<T>::Type;
const int H = qk_head_size / 4;
const int H_v = v_head_size / 4;
const T4Type* input2 = reinterpret_cast<const T4Type*>(input);
const T4Type* biases2 = reinterpret_cast<const T4Type*>(biases);
T4Type* output2 = reinterpret_cast<T4Type*>(output);
InvokeAddBiasTranspose<T4Type>(
input2, biases2, output2,
batch_size, sequence_length,
num_heads, H, H_v,
format, token_offset, token_count, stream);
} else if (0 == (qk_head_size & 1) && 0 == (v_head_size & 1)) {
using T2Type = typename T2<T>::Type;
const int H = qk_head_size / 2;
const int H_v = v_head_size / 2;
const T2Type* input2 = reinterpret_cast<const T2Type*>(input);
const T2Type* biases2 = reinterpret_cast<const T2Type*>(biases);
T2Type* output2 = reinterpret_cast<T2Type*>(output);
InvokeAddBiasTranspose<T2Type>(
input2, biases2, output2,
batch_size, sequence_length,
num_heads, H, H_v,
format, token_offset, token_count, stream);
} else {
InvokeAddBiasTranspose<T>(
input, biases, output,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
format, token_offset, token_count, stream);
}
}
// Input: BxNxSxH
// Output: TxNxH
// where:
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
// Grid: T
// Block: 256
template <typename T>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
TransposeRemovePadding(T* target, const T* source, const int* token_offset,
const int B, const int S, const int N, const int H) {
int token_idx = blockIdx.x;
int source_idx = token_offset[token_idx];
int b = source_idx / S;
int s = source_idx - b * S;
target += token_idx * N * H;
source += b * N * S * H + s * H;
for (int i = threadIdx.x; i < N * H; i += blockDim.x) {
int n = i / H;
int h = i - n * H;
target[i] = source[n * S * H + h];
}
}
template <typename T>
Status LaunchTransposeRemovePadding(
T* output, const T* input,
const int* token_offset, const int token_count,
const int batch_size, const int seq_len, const int number_heads, const int head_size,
hipStream_t stream);
// input: [batch_size, number_heads, seq_len, head_size]
// output: [token_count, number_heads * head_size]
template <>
Status LaunchTransposeRemovePadding(
half* output, const half* input,
const int* token_offset, const int token_count,
const int batch_size, const int seq_len, const int number_heads, const int head_size,
hipStream_t stream) {
// Make sure memory is aligned to 128 bit
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (head_size % 8 == 0) {
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
hipLaunchKernelGGL(( TransposeRemovePadding<int4>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size / 8);
} else if (head_size % 4 == 0) {
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
hipLaunchKernelGGL(( TransposeRemovePadding<int64_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size / 4);
} else if (head_size % 2 == 0) {
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
hipLaunchKernelGGL(( TransposeRemovePadding<int32_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size / 2);
} else {
const int16_t* input2 = reinterpret_cast<const int16_t*>(input);
int16_t* output2 = reinterpret_cast<int16_t*>(output);
hipLaunchKernelGGL(( TransposeRemovePadding<int16_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size);
}
return CUDA_CALL(hipGetLastError());
}
// input: [batch_size, number_heads, seq_len, head_size]
// output: [token_count, number_heads * head_size]
template <>
Status LaunchTransposeRemovePadding(
float* output, const float* input,
const int* token_offset, const int token_count,
const int batch_size, const int seq_len, const int number_heads, const int head_size,
hipStream_t stream) {
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (head_size % 4 == 0) {
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
hipLaunchKernelGGL(( TransposeRemovePadding<int4>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size / 4);
} else if (head_size % 2 == 0) {
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
hipLaunchKernelGGL(( TransposeRemovePadding<int64_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size / 2);
} else {
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
hipLaunchKernelGGL(( TransposeRemovePadding<int32_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size);
}
return CUDA_CALL(hipGetLastError());
}
template <typename T>
Status FusedScaledDotProductAttention(
const hipDeviceProp_t& device_prop,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<T>& data) {
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
void* fused_runner = data.fused_runner;
ORT_RETURN_IF_NOT(nullptr != fused_runner, "fused_runner cannot be NULL");
LaunchAddBiasTranspose(data.gemm_buffer, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
AttentionQkvFormat::QKV_BSN3H, data.token_offset,
parameters.token_count, stream);
FusedMHARunnerFP16v2* fused_fp16_runner = reinterpret_cast<FusedMHARunnerFP16v2*>(fused_runner);
const int S = fused_fp16_runner->getSFromMaxSeqLen(sequence_length);
fused_fp16_runner->setup(S, batch_size);
fused_fp16_runner->run(data.workspace, data.cumulative_sequence_length, data.output, stream);
return Status::OK();
}
#if USE_FLASH_ATTENTION
template <typename T>
Status FusedScaledDotProductAttentionCutlass(
const hipDeviceProp_t& device_prop,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<T>& data) {
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
LaunchAddBiasTranspose(data.gemm_buffer, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
AttentionQkvFormat::Q_K_V_BSNH, data.token_offset,
parameters.token_count, stream);
DUMP_TENSOR_INIT();
DUMP_TENSOR_D("PackedAttention cutlass data.gemm_buffer", data.gemm_buffer, parameters.token_count, 3, num_heads * qk_head_size);
DUMP_TENSOR_D("PackedAttention cutlass data.bias", data.bias, 1, 3 * num_heads * qk_head_size);
// Q, K and V pointers
const int model_dimension_qk = num_heads * qk_head_size;
const int model_dimension_v = num_heads * v_head_size;
const size_t elements_qk = static_cast<size_t>(parameters.token_count) * static_cast<size_t>(model_dimension_qk);
const size_t elements_v = static_cast<size_t>(parameters.token_count) * static_cast<size_t>(model_dimension_v);
T* qkv = data.workspace;
T* query = qkv;
T* key = query + elements_qk;
T* value = key + elements_qk;
T* accum_workspace = value + elements_v;
DUMP_TENSOR_D("PackedAttention cutlass q(BSNH)", query, parameters.token_count, num_heads * qk_head_size);
DUMP_TENSOR_D("PackedAttention cutlass k(BSNH)", key, parameters.token_count, num_heads * qk_head_size);
DUMP_TENSOR_D("PackedAttention cutlass v(BSNH)", value, parameters.token_count, num_heads * v_head_size);
DUMP_TENSOR_D("PackedAttention cutlass cumulative_sequence_length", data.cumulative_sequence_length, 1, batch_size + 1);
MemoryEfficientAttentionParams p;
p.sm = device_prop.major * 10 + device_prop.minor;
p.is_half = sizeof(T) == 2;
p.batch_size = parameters.batch_size;
p.num_heads = parameters.num_heads;
p.sequence_length = parameters.sequence_length;
p.kv_sequence_length = parameters.sequence_length;
p.qk_head_size = parameters.head_size;
p.v_head_size = parameters.v_head_size;
p.causal = false;
p.scale = parameters.scale == 0.0f ? 1.f / sqrt(static_cast<float>(qk_head_size))
: parameters.scale;
p.seqlen_k_ptr = nullptr;
p.seqstart_q_ptr = const_cast<int32_t*>(data.cumulative_sequence_length);
p.seqstart_k_ptr = const_cast<int32_t*>(data.cumulative_sequence_length);
p.query = query;
p.key = key;
p.value = value;
p.attn_bias = data.relative_position_bias;
p.is_attn_bias_batched = !parameters.broadcast_res_pos_bias;
p.output = data.output;
p.workspace = MemoryEfficientAttentionParams::need_workspace(v_head_size, sizeof(T) == sizeof(float)) ? accum_workspace : nullptr;
p.stream = stream;
run_memory_efficient_attention(p);
DUMP_TENSOR("PackedAttention cutlass output", data.output, parameters.token_count, num_heads, v_head_size);
return Status::OK();
}
#endif
template <typename T>
Status UnfusedScaledDotProductAttention(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<T>& data) {
constexpr size_t element_size = sizeof(T);
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
const int batches = batch_size * num_heads;
const int size_per_batch_q = sequence_length * qk_head_size;
const int size_per_batch_k = sequence_length * qk_head_size;
const int size_per_batch_v = sequence_length * v_head_size;
const size_t elements_q = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_q);
const size_t elements_k = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_k);
const size_t elements_v = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_v);
// Q, K and V pointers when fused attention is not used
T* qkv = data.workspace;
T* q = qkv;
T* k = q + elements_q;
T* v = k + elements_k;
LaunchAddBiasTranspose(data.gemm_buffer, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
AttentionQkvFormat::Q_K_V_BNSH, data.token_offset,
parameters.token_count, stream);
T* scaled_qk = qkv + elements_q + elements_k + elements_v;
// Q, K and V are ready now
DUMP_TENSOR_INIT();
DUMP_TENSOR_D("PackedAttention unfused gemm_buffer", data.gemm_buffer, parameters.token_count, (num_heads * (qk_head_size * 2 + v_head_size)));
DUMP_TENSOR_D("PackedAttention unfused data.workspace", data.workspace, 3 * batch_size, num_heads, sequence_length, qk_head_size);
// Compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scaled_qk: BxNxSxT
// Q: BxNxSxH, K: BxNxSxH, Q*K': BxNxSxS
float one = 1.0f;
float zero = 0.f;
float scale = parameters.scale == 0.0f ? 1.f / sqrt(static_cast<float>(qk_head_size))
: parameters.scale;
hipblasSetStream(cublas, stream);
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, HIPBLAS_OP_T, HIPBLAS_OP_N,
sequence_length, sequence_length, qk_head_size,
&scale,
k, qk_head_size, sequence_length * qk_head_size,
q, qk_head_size, sequence_length * qk_head_size,
&zero,
scaled_qk, sequence_length, sequence_length * sequence_length,
batches, device_prop));
DUMP_TENSOR_D("PackedAttention unfused QK", scaled_qk, batch_size * num_heads, sequence_length, sequence_length);
const size_t bytes = GetAttentionScratchSize(element_size, batch_size, num_heads,
sequence_length);
T* attention_score = scaled_qk + (bytes / element_size);
// Apply softmax and store result R to attention_score: BxNxSxS
ORT_RETURN_IF_ERROR(ComputeSoftmaxWithCumSeqLength<T>(
scaled_qk,
data.relative_position_bias,
parameters.broadcast_res_pos_bias,
data.cumulative_sequence_length,
batch_size,
sequence_length,
num_heads,
attention_score, stream));
DUMP_TENSOR_D("PackedAttention unfused Softmax", attention_score, batch_size * num_heads, sequence_length, sequence_length);
// compute R*V (as V*R), and store in temp_output (space used by Q): BxNxSxH_v
T* temp_output = qkv;
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, HIPBLAS_OP_N, HIPBLAS_OP_N,
v_head_size, sequence_length, sequence_length,
&one, v, v_head_size, sequence_length * v_head_size,
attention_score, sequence_length, sequence_length * sequence_length,
&zero, temp_output, v_head_size, sequence_length * v_head_size, batches, device_prop));
// Temp_output is BxNxSxH_v, transpose and remove padding to output token_countxNxH_v
Status result = LaunchTransposeRemovePadding(
data.output, temp_output,
data.token_offset, parameters.token_count,
batch_size, sequence_length, num_heads, v_head_size,
stream);
DUMP_TENSOR("PackedAttention unfused output", data.output, parameters.token_count, num_heads, v_head_size);
return result;
}
template <typename T>
Status QkvToContext(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<T>& data) {
void* fused_runner = data.fused_runner;
if (nullptr != fused_runner) {
return FusedScaledDotProductAttention<T>(device_prop, stream, parameters, data);
}
#if USE_FLASH_ATTENTION
if (data.use_memory_efficient_attention) {
return FusedScaledDotProductAttentionCutlass(device_prop, stream, parameters, data);
}
#endif
return UnfusedScaledDotProductAttention<T>(device_prop, cublas, stream, parameters, data);
}
template Status QkvToContext<float>(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<float>& data);
template Status QkvToContext<half>(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<half>& data);
template Status LaunchTransposeRemovePadding<float>(
float* output, const float* input,
const int* token_offset, const int token_count,
const int batch_size, const int seq_len, const int number_heads, const int head_size,
hipStream_t stream);
template Status LaunchTransposeRemovePadding<half>(
half* output, const half* input,
const int* token_offset, const int token_count,
const int batch_size, const int seq_len, const int number_heads, const int head_size,
hipStream_t stream);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 3af61530133d0b81d29e723b4cd6d84db9ac7e11.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cassert>
#include <cuda_fp16.h>
#include <cub/cub.cuh>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/fpgeneric.h"
#include "contrib_ops/cuda/bert/packed_attention_impl.h"
#include "contrib_ops/cuda/bert/attention_softmax.h"
#include "contrib_ops/cuda/bert/transformer_common.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/cross_attention/fmha_cross_attention.h"
#include "contrib_ops/cuda/bert/bert_padding.h"
#include "contrib_ops/cuda/transformers/dump_cuda_tensor.h"
#include "contrib_ops/cuda/bert/cutlass_fmha/memory_efficient_attention.h"
#include "contrib_ops/cuda/bert/rotary_embedding_util.h"
using namespace onnxruntime::cuda;
using namespace onnxruntime::contrib::attention_softmax_cuda;
#define CHECK_CUDA(expr) CUDA_RETURN_IF_ERROR(expr)
namespace onnxruntime {
namespace contrib {
namespace cuda {
constexpr size_t kCUDAMemoryAlignment = 256;
constexpr int32_t kMAX_THREADS_PER_BLOCK = 256;
size_t GetAttentionScratchSize(
size_t element_size,
size_t batch_size,
size_t num_heads,
size_t sequence_length) {
const size_t bytes = element_size * batch_size * num_heads * sequence_length * sequence_length;
return ((bytes + kCUDAMemoryAlignment - 1) / kCUDAMemoryAlignment) * kCUDAMemoryAlignment;
}
size_t GetAttentionWorkspaceSize(
size_t element_size,
size_t batch_size,
size_t num_heads,
size_t qk_head_size,
size_t v_head_size,
size_t sequence_length,
void* fused_runner,
bool use_memory_efficient_attention,
bool no_qkv_workspace) {
// Note that q, k and v might need alignment for fused attention kernels.
const size_t qkv_bytes = no_qkv_workspace ? 0 : (element_size * batch_size * num_heads * sequence_length * (qk_head_size + qk_head_size + v_head_size));
if (fused_runner != nullptr) {
return qkv_bytes;
}
#if USE_FLASH_ATTENTION
if (use_memory_efficient_attention) {
size_t fmha_buffer_bytes = 0;
if (MemoryEfficientAttentionParams::need_workspace(v_head_size, element_size == sizeof(float))) {
fmha_buffer_bytes = batch_size * sequence_length * num_heads * v_head_size * sizeof(float);
}
return qkv_bytes + fmha_buffer_bytes;
}
#else
ORT_UNUSED_PARAMETER(use_memory_efficient_attention);
#endif
return qkv_bytes + 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length);
}
// Grid: (S, B)
// Block: 256
// For unfused PackedAttention
// Input: Tx3xNxH
// Output: 3xBxNxSxH
// Where:
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void AddBiasTransposeQKVPacked(
const T* input,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* q,
T* k,
T* v,
const int32_t* token_offset,
int32_t token_count) {
int s = blockIdx.x;
int b = blockIdx.y;
int S = gridDim.x;
const int packing_token_idx = b * S + s;
const int padding_token_idx = token_offset[packing_token_idx];
b = padding_token_idx / S;
s = padding_token_idx - b * S;
input += packing_token_idx * N * (H_QK + H_QK + H_V);
int k_offset = N * H_QK;
int v_offset = N * H_QK + N * H_QK;
q += (b * N * S + s) * H_QK;
k += (b * N * S + s) * H_QK;
v += (b * N * S + s) * H_V;
if (packing_token_idx < token_count) {
for (int i = threadIdx.x; i < N * H_QK; i += blockDim.x) {
int h = i % H_QK;
int n = i / H_QK;
q[n * S * H_QK + h] = input[i] + biases[i];
k[n * S * H_QK + h] = input[i + k_offset] + biases[i + k_offset];
}
for (int i = threadIdx.x; i < N * H_V; i += blockDim.x) {
int h = i % H_V;
int n = i / H_V;
v[n * S * H_V + h] = input[i + v_offset] + biases[i + v_offset];
}
} else {
for (int i = threadIdx.x; i < N * H_QK; i += blockDim.x) {
int h = i % H_QK;
int n = i / H_QK;
q[n * S * H_QK + h] = biases[i];
k[n * S * H_QK + h] = biases[i + k_offset];
}
for (int i = threadIdx.x; i < N * H_V; i += blockDim.x) {
int h = i % H_V;
int n = i / H_V;
v[n * S * H_V + h] = biases[i + v_offset];
}
}
}
// Grid: (T)
// Block: 256
// For memory efficient fMHA from CUTLASS. For future use, doesn't support fMHA from CUTLASS yet.
// Input: Tx3xNxH
// Output: 3xTxNxH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void AddBiasTransposeQKVPackedCutlass(
const T* input,
const T* biases,
int32_t D_QK,
int32_t D_V,
T* q,
T* k,
T* v,
int32_t token_count) {
int token_idx = blockIdx.x;
input += token_idx * (D_QK + D_QK + D_V);
q += token_idx * D_QK;
k += token_idx * D_QK;
v += token_idx * D_V;
if (token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
q[i] = input[i] + biases[i];
k[i] = input[D_QK + i] + biases[D_QK + i];
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
v[i] = input[D_QK + D_QK + i] + biases[D_QK + D_QK + i];
}
}
}
// Grid: (T)
// Block: 256
// For fMHA from TRT
// Input: Tx3xNxH
// Output: TxNx3xH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void AddBiasTransposeQKVPackedTRT(
const T* input,
const T* biases,
int32_t N,
int32_t H,
T* output) {
int token_idx = blockIdx.x;
int Hx3 = H * 3;
int NxH = N * H;
int NxHx2 = N * H + N * H;
int offset = token_idx * N * Hx3;
input += offset;
output += offset;
for (int i = threadIdx.x; i < N * H; i += blockDim.x) {
int n = i / H;
int h = i % H;
output[n * Hx3 + h] = input[i] + biases[i];
output[n * Hx3 + H + h] = input[i + NxH] + biases[i + NxH];
output[n * Hx3 + H + H + h] = input[i + NxHx2] + biases[i + NxHx2];
}
}
template <typename T>
void InvokeAddBiasTranspose(
const T* input, const T* biases, T* output,
const int batch_size, const int sequence_length,
const int num_heads, const int qk_head_size, const int v_head_size,
AttentionQkvFormat format, const int32_t* token_offset, int32_t token_count,
cudaStream_t stream) {
if (format == AttentionQkvFormat::Q_K_V_BNSH) {
const dim3 grid(sequence_length, batch_size);
AddBiasTransposeQKVPacked<T><<<grid, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
input,
biases,
num_heads,
qk_head_size,
v_head_size,
output,
output + batch_size * sequence_length * num_heads * qk_head_size,
output + 2 * batch_size * sequence_length * num_heads * qk_head_size,
token_offset,
token_count);
} else if (format == AttentionQkvFormat::Q_K_V_BSNH) {
const dim3 grid(token_count);
AddBiasTransposeQKVPackedCutlass<T><<<grid, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
input,
biases,
num_heads * qk_head_size,
num_heads * v_head_size,
output,
output + token_count * num_heads * qk_head_size,
output + 2 * token_count * num_heads * qk_head_size,
token_count);
} else {
ORT_ENFORCE(format == AttentionQkvFormat::QKV_BSN3H);
const dim3 grid(token_count);
AddBiasTransposeQKVPackedTRT<T><<<grid, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
input,
biases,
num_heads,
qk_head_size,
output);
}
}
template <typename T>
struct T4;
template <>
struct T4<float> {
using Type = float4;
};
template <>
struct T4<half> {
using Type = Half4;
};
template <typename T>
struct T2;
template <>
struct T2<float> {
using Type = float2;
};
template <>
struct T2<half> {
using Type = half2;
};
template <typename T>
void LaunchAddBiasTranspose(
const T* input, const T* biases, T* output,
const int batch_size, const int sequence_length,
const int num_heads, const int qk_head_size, const int v_head_size,
AttentionQkvFormat format, const int32_t* token_offset, int32_t token_count,
cudaStream_t stream) {
if (0 == (qk_head_size & 3) && 0 == (v_head_size & 3)) {
using T4Type = typename T4<T>::Type;
const int H = qk_head_size / 4;
const int H_v = v_head_size / 4;
const T4Type* input2 = reinterpret_cast<const T4Type*>(input);
const T4Type* biases2 = reinterpret_cast<const T4Type*>(biases);
T4Type* output2 = reinterpret_cast<T4Type*>(output);
InvokeAddBiasTranspose<T4Type>(
input2, biases2, output2,
batch_size, sequence_length,
num_heads, H, H_v,
format, token_offset, token_count, stream);
} else if (0 == (qk_head_size & 1) && 0 == (v_head_size & 1)) {
using T2Type = typename T2<T>::Type;
const int H = qk_head_size / 2;
const int H_v = v_head_size / 2;
const T2Type* input2 = reinterpret_cast<const T2Type*>(input);
const T2Type* biases2 = reinterpret_cast<const T2Type*>(biases);
T2Type* output2 = reinterpret_cast<T2Type*>(output);
InvokeAddBiasTranspose<T2Type>(
input2, biases2, output2,
batch_size, sequence_length,
num_heads, H, H_v,
format, token_offset, token_count, stream);
} else {
InvokeAddBiasTranspose<T>(
input, biases, output,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
format, token_offset, token_count, stream);
}
}
// Input: BxNxSxH
// Output: TxNxH
// where:
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
// Grid: T
// Block: 256
template <typename T>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
TransposeRemovePadding(T* target, const T* source, const int* token_offset,
const int B, const int S, const int N, const int H) {
int token_idx = blockIdx.x;
int source_idx = token_offset[token_idx];
int b = source_idx / S;
int s = source_idx - b * S;
target += token_idx * N * H;
source += b * N * S * H + s * H;
for (int i = threadIdx.x; i < N * H; i += blockDim.x) {
int n = i / H;
int h = i - n * H;
target[i] = source[n * S * H + h];
}
}
template <typename T>
Status LaunchTransposeRemovePadding(
T* output, const T* input,
const int* token_offset, const int token_count,
const int batch_size, const int seq_len, const int number_heads, const int head_size,
cudaStream_t stream);
// input: [batch_size, number_heads, seq_len, head_size]
// output: [token_count, number_heads * head_size]
template <>
Status LaunchTransposeRemovePadding(
half* output, const half* input,
const int* token_offset, const int token_count,
const int batch_size, const int seq_len, const int number_heads, const int head_size,
cudaStream_t stream) {
// Make sure memory is aligned to 128 bit
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (head_size % 8 == 0) {
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
TransposeRemovePadding<int4><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size / 8);
} else if (head_size % 4 == 0) {
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
TransposeRemovePadding<int64_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size / 4);
} else if (head_size % 2 == 0) {
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
TransposeRemovePadding<int32_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size / 2);
} else {
const int16_t* input2 = reinterpret_cast<const int16_t*>(input);
int16_t* output2 = reinterpret_cast<int16_t*>(output);
TransposeRemovePadding<int16_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size);
}
return CUDA_CALL(cudaGetLastError());
}
// input: [batch_size, number_heads, seq_len, head_size]
// output: [token_count, number_heads * head_size]
template <>
Status LaunchTransposeRemovePadding(
float* output, const float* input,
const int* token_offset, const int token_count,
const int batch_size, const int seq_len, const int number_heads, const int head_size,
cudaStream_t stream) {
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (head_size % 4 == 0) {
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
TransposeRemovePadding<int4><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size / 4);
} else if (head_size % 2 == 0) {
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
TransposeRemovePadding<int64_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size / 2);
} else {
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
TransposeRemovePadding<int32_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, batch_size, seq_len, number_heads, head_size);
}
return CUDA_CALL(cudaGetLastError());
}
template <typename T>
Status FusedScaledDotProductAttention(
const cudaDeviceProp& device_prop,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<T>& data) {
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
void* fused_runner = data.fused_runner;
ORT_RETURN_IF_NOT(nullptr != fused_runner, "fused_runner cannot be NULL");
LaunchAddBiasTranspose(data.gemm_buffer, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
AttentionQkvFormat::QKV_BSN3H, data.token_offset,
parameters.token_count, stream);
FusedMHARunnerFP16v2* fused_fp16_runner = reinterpret_cast<FusedMHARunnerFP16v2*>(fused_runner);
const int S = fused_fp16_runner->getSFromMaxSeqLen(sequence_length);
fused_fp16_runner->setup(S, batch_size);
fused_fp16_runner->run(data.workspace, data.cumulative_sequence_length, data.output, stream);
return Status::OK();
}
#if USE_FLASH_ATTENTION
template <typename T>
Status FusedScaledDotProductAttentionCutlass(
const cudaDeviceProp& device_prop,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<T>& data) {
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
LaunchAddBiasTranspose(data.gemm_buffer, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
AttentionQkvFormat::Q_K_V_BSNH, data.token_offset,
parameters.token_count, stream);
DUMP_TENSOR_INIT();
DUMP_TENSOR_D("PackedAttention cutlass data.gemm_buffer", data.gemm_buffer, parameters.token_count, 3, num_heads * qk_head_size);
DUMP_TENSOR_D("PackedAttention cutlass data.bias", data.bias, 1, 3 * num_heads * qk_head_size);
// Q, K and V pointers
const int model_dimension_qk = num_heads * qk_head_size;
const int model_dimension_v = num_heads * v_head_size;
const size_t elements_qk = static_cast<size_t>(parameters.token_count) * static_cast<size_t>(model_dimension_qk);
const size_t elements_v = static_cast<size_t>(parameters.token_count) * static_cast<size_t>(model_dimension_v);
T* qkv = data.workspace;
T* query = qkv;
T* key = query + elements_qk;
T* value = key + elements_qk;
T* accum_workspace = value + elements_v;
DUMP_TENSOR_D("PackedAttention cutlass q(BSNH)", query, parameters.token_count, num_heads * qk_head_size);
DUMP_TENSOR_D("PackedAttention cutlass k(BSNH)", key, parameters.token_count, num_heads * qk_head_size);
DUMP_TENSOR_D("PackedAttention cutlass v(BSNH)", value, parameters.token_count, num_heads * v_head_size);
DUMP_TENSOR_D("PackedAttention cutlass cumulative_sequence_length", data.cumulative_sequence_length, 1, batch_size + 1);
MemoryEfficientAttentionParams p;
p.sm = device_prop.major * 10 + device_prop.minor;
p.is_half = sizeof(T) == 2;
p.batch_size = parameters.batch_size;
p.num_heads = parameters.num_heads;
p.sequence_length = parameters.sequence_length;
p.kv_sequence_length = parameters.sequence_length;
p.qk_head_size = parameters.head_size;
p.v_head_size = parameters.v_head_size;
p.causal = false;
p.scale = parameters.scale == 0.0f ? 1.f / sqrt(static_cast<float>(qk_head_size))
: parameters.scale;
p.seqlen_k_ptr = nullptr;
p.seqstart_q_ptr = const_cast<int32_t*>(data.cumulative_sequence_length);
p.seqstart_k_ptr = const_cast<int32_t*>(data.cumulative_sequence_length);
p.query = query;
p.key = key;
p.value = value;
p.attn_bias = data.relative_position_bias;
p.is_attn_bias_batched = !parameters.broadcast_res_pos_bias;
p.output = data.output;
p.workspace = MemoryEfficientAttentionParams::need_workspace(v_head_size, sizeof(T) == sizeof(float)) ? accum_workspace : nullptr;
p.stream = stream;
run_memory_efficient_attention(p);
DUMP_TENSOR("PackedAttention cutlass output", data.output, parameters.token_count, num_heads, v_head_size);
return Status::OK();
}
#endif
template <typename T>
Status UnfusedScaledDotProductAttention(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<T>& data) {
constexpr size_t element_size = sizeof(T);
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
const int batches = batch_size * num_heads;
const int size_per_batch_q = sequence_length * qk_head_size;
const int size_per_batch_k = sequence_length * qk_head_size;
const int size_per_batch_v = sequence_length * v_head_size;
const size_t elements_q = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_q);
const size_t elements_k = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_k);
const size_t elements_v = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_v);
// Q, K and V pointers when fused attention is not used
T* qkv = data.workspace;
T* q = qkv;
T* k = q + elements_q;
T* v = k + elements_k;
LaunchAddBiasTranspose(data.gemm_buffer, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
AttentionQkvFormat::Q_K_V_BNSH, data.token_offset,
parameters.token_count, stream);
T* scaled_qk = qkv + elements_q + elements_k + elements_v;
// Q, K and V are ready now
DUMP_TENSOR_INIT();
DUMP_TENSOR_D("PackedAttention unfused gemm_buffer", data.gemm_buffer, parameters.token_count, (num_heads * (qk_head_size * 2 + v_head_size)));
DUMP_TENSOR_D("PackedAttention unfused data.workspace", data.workspace, 3 * batch_size, num_heads, sequence_length, qk_head_size);
// Compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scaled_qk: BxNxSxT
// Q: BxNxSxH, K: BxNxSxH, Q*K': BxNxSxS
float one = 1.0f;
float zero = 0.f;
float scale = parameters.scale == 0.0f ? 1.f / sqrt(static_cast<float>(qk_head_size))
: parameters.scale;
cublasSetStream(cublas, stream);
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, CUBLAS_OP_T, CUBLAS_OP_N,
sequence_length, sequence_length, qk_head_size,
&scale,
k, qk_head_size, sequence_length * qk_head_size,
q, qk_head_size, sequence_length * qk_head_size,
&zero,
scaled_qk, sequence_length, sequence_length * sequence_length,
batches, device_prop));
DUMP_TENSOR_D("PackedAttention unfused QK", scaled_qk, batch_size * num_heads, sequence_length, sequence_length);
const size_t bytes = GetAttentionScratchSize(element_size, batch_size, num_heads,
sequence_length);
T* attention_score = scaled_qk + (bytes / element_size);
// Apply softmax and store result R to attention_score: BxNxSxS
ORT_RETURN_IF_ERROR(ComputeSoftmaxWithCumSeqLength<T>(
scaled_qk,
data.relative_position_bias,
parameters.broadcast_res_pos_bias,
data.cumulative_sequence_length,
batch_size,
sequence_length,
num_heads,
attention_score, stream));
DUMP_TENSOR_D("PackedAttention unfused Softmax", attention_score, batch_size * num_heads, sequence_length, sequence_length);
// compute R*V (as V*R), and store in temp_output (space used by Q): BxNxSxH_v
T* temp_output = qkv;
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, CUBLAS_OP_N, CUBLAS_OP_N,
v_head_size, sequence_length, sequence_length,
&one, v, v_head_size, sequence_length * v_head_size,
attention_score, sequence_length, sequence_length * sequence_length,
&zero, temp_output, v_head_size, sequence_length * v_head_size, batches, device_prop));
// Temp_output is BxNxSxH_v, transpose and remove padding to output token_countxNxH_v
Status result = LaunchTransposeRemovePadding(
data.output, temp_output,
data.token_offset, parameters.token_count,
batch_size, sequence_length, num_heads, v_head_size,
stream);
DUMP_TENSOR("PackedAttention unfused output", data.output, parameters.token_count, num_heads, v_head_size);
return result;
}
template <typename T>
Status QkvToContext(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<T>& data) {
void* fused_runner = data.fused_runner;
if (nullptr != fused_runner) {
return FusedScaledDotProductAttention<T>(device_prop, stream, parameters, data);
}
#if USE_FLASH_ATTENTION
if (data.use_memory_efficient_attention) {
return FusedScaledDotProductAttentionCutlass(device_prop, stream, parameters, data);
}
#endif
return UnfusedScaledDotProductAttention<T>(device_prop, cublas, stream, parameters, data);
}
template Status QkvToContext<float>(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<float>& data);
template Status QkvToContext<half>(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedAttentionData<half>& data);
template Status LaunchTransposeRemovePadding<float>(
float* output, const float* input,
const int* token_offset, const int token_count,
const int batch_size, const int seq_len, const int number_heads, const int head_size,
cudaStream_t stream);
template Status LaunchTransposeRemovePadding<half>(
half* output, const half* input,
const int* token_offset, const int token_count,
const int batch_size, const int seq_len, const int number_heads, const int head_size,
cudaStream_t stream);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
0472b93c0405171c059f5a3a6a9f038e20f6319d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/core/math/math.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/pinned_vector.h"
#include "saiga/cuda/reduce.h"
#include <iostream>
#include <vector>
using namespace Saiga;
using Saiga::ArrayView;
using Saiga::CUDA::ThreadInfo;
//#define LECTURE
#ifdef LECTURE
template <typename T>
__global__ static void warpReduceSimple(ArrayView<T> data, ArrayView<T> output)
{
ThreadInfo<> ti;
if (ti.thread_id >= data.size()) return;
}
static void reduceTest()
{
int N = 823674;
using T = int;
Saiga::pinned_vector<T> h_data(N);
for (auto& f : h_data)
{
f = rand() % 10;
}
thrust::device_vector<T> d_data = h_data;
thrust::device_vector<T> output(1);
{
int n = 32;
// Reduce only the first n elements
thrust::device_vector<T> data(d_data.begin(), d_data.begin() + n);
hipLaunchKernelGGL(( warpReduceSimple<T>), dim3(1), dim3(n), 0, 0, data, output);
// Validate output with thrust::reduce
T res = output[0];
T tres = thrust::reduce(data.begin(), data.end());
std::cout << "warpReduceSimple=" << res << ", thrust::reduce=" << tres << std::endl;
SAIGA_ASSERT(res == tres);
}
}
int main(int argc, char* argv[])
{
reduceTest();
std::cout << "Done." << std::endl;
}
#else
template <typename T>
__device__ inline T warpReduceSum(T val)
{
# pragma unroll
for (int offset = 16; offset > 0; offset /= 2)
{
auto v = Saiga::CUDA::shfl_down(val, offset);
val = val + v;
}
return val;
}
template <typename T>
__global__ static void warpReduceSimple(ArrayView<T> data, ArrayView<T> output)
{
ThreadInfo<> ti;
if (ti.thread_id >= data.size()) return;
auto v = data[ti.thread_id];
v = warpReduceSum(v);
if (ti.thread_id == 0) output[0] = v;
}
template <typename T>
__device__ inline T blockReduceSum(T val, T& blockSum)
{
int lane = threadIdx.x & (SAIGA_WARP_SIZE - 1);
// Each warp reduces with registers
val = warpReduceSum(val);
// Init shared memory
if (threadIdx.x == 0) blockSum = T(0);
__syncthreads();
// The first thread in each warp writes to smem
if (lane == 0)
{
atomicAdd(&blockSum, val);
}
__syncthreads();
// The first thread in this block has the result
// Optional: remove if so that every thread has the result
if (threadIdx.x == 0) val = blockSum;
return val;
}
template <typename T>
__global__ static void blockReduceSimple(ArrayView<T> data, ArrayView<T> output)
{
ThreadInfo<> ti;
if (ti.thread_id >= data.size()) return;
__shared__ T blockSum;
auto v = data[ti.thread_id];
v = blockReduceSum(v, blockSum);
if (ti.local_thread_id == 0) output[0] = v;
}
template <typename T>
__global__ static void globalReduceSimple(ArrayView<T> data, ArrayView<T> output)
{
ThreadInfo<> ti;
__shared__ T blockSum;
// All threads needs to participate
// -> reduce a 0 for out-of-bounds threads
auto v = ti.thread_id >= data.size() ? 0 : data[ti.thread_id];
v = blockReduceSum(v, blockSum);
if (ti.local_thread_id == 0) atomicAdd(output.data(), v);
}
struct Particle
{
vec3 position;
float radius = 0;
};
struct MaxRadius
{
HD Particle operator()(const Particle& p1, const Particle& p2) { return p1.radius < p2.radius ? p2 : p1; }
};
static void reduceTest()
{
int N = 823674;
using T = int;
Saiga::pinned_vector<T> h_data(N);
for (auto& f : h_data)
{
f = rand() % 10;
}
thrust::device_vector<T> d_data = h_data;
thrust::device_vector<T> output(1);
{
int n = 32;
// Reduce only the first n elements
thrust::device_vector<T> data(d_data.begin(), d_data.begin() + n);
hipLaunchKernelGGL(( warpReduceSimple<T>), dim3(1), dim3(n), 0, 0, data, output);
// Validate output with thrust::reduce
T res = output[0];
T tres = thrust::reduce(data.begin(), data.end());
std::cout << "warpReduceSimple=" << res << ", thrust::reduce=" << tres << std::endl;
SAIGA_ASSERT(res == tres);
}
{
int n = 256;
// Reduce only the first n elements
thrust::device_vector<T> data(d_data.begin(), d_data.begin() + n);
hipLaunchKernelGGL(( blockReduceSimple<T>), dim3(1), dim3(n), 0, 0, data, output);
// Validate output with thrust::reduce
T res = output[0];
T tres = thrust::reduce(data.begin(), data.end());
std::cout << "blockReduceSimple=" << res << ", thrust::reduce=" << tres << std::endl;
SAIGA_ASSERT(res == tres);
}
{
// Reduce everything
output[0] = 0;
globalReduceSimple<T><<<THREAD_BLOCK(N, 128)>>>(d_data, output);
// Validate output with thrust::reduce
T res = output[0];
T tres = thrust::reduce(d_data.begin(), d_data.end());
std::cout << "globalReduceSimple=" << res << ", thrust::reduce=" << tres << std::endl;
SAIGA_ASSERT(res == tres);
}
{
// thrust::reduce with a custom reduce operator
// Here: Finding the particle with the largest radius
thrust::device_vector<Particle> particles(100000);
Particle test;
test.radius = 12314;
particles[100] = test;
Particle p = thrust::reduce(particles.begin(), particles.end(), Particle(), MaxRadius());
std::cout << "Max radius = " << p.radius << std::endl;
SAIGA_ASSERT(test.radius == p.radius);
}
}
int main(int argc, char* argv[])
{
reduceTest();
std::cout << "Done." << std::endl;
}
#endif
| 0472b93c0405171c059f5a3a6a9f038e20f6319d.cu | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/core/math/math.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/pinned_vector.h"
#include "saiga/cuda/reduce.h"
#include <iostream>
#include <vector>
using namespace Saiga;
using Saiga::ArrayView;
using Saiga::CUDA::ThreadInfo;
//#define LECTURE
#ifdef LECTURE
template <typename T>
__global__ static void warpReduceSimple(ArrayView<T> data, ArrayView<T> output)
{
ThreadInfo<> ti;
if (ti.thread_id >= data.size()) return;
}
static void reduceTest()
{
int N = 823674;
using T = int;
Saiga::pinned_vector<T> h_data(N);
for (auto& f : h_data)
{
f = rand() % 10;
}
thrust::device_vector<T> d_data = h_data;
thrust::device_vector<T> output(1);
{
int n = 32;
// Reduce only the first n elements
thrust::device_vector<T> data(d_data.begin(), d_data.begin() + n);
warpReduceSimple<T><<<1, n>>>(data, output);
// Validate output with thrust::reduce
T res = output[0];
T tres = thrust::reduce(data.begin(), data.end());
std::cout << "warpReduceSimple=" << res << ", thrust::reduce=" << tres << std::endl;
SAIGA_ASSERT(res == tres);
}
}
int main(int argc, char* argv[])
{
reduceTest();
std::cout << "Done." << std::endl;
}
#else
template <typename T>
__device__ inline T warpReduceSum(T val)
{
# pragma unroll
for (int offset = 16; offset > 0; offset /= 2)
{
auto v = Saiga::CUDA::shfl_down(val, offset);
val = val + v;
}
return val;
}
template <typename T>
__global__ static void warpReduceSimple(ArrayView<T> data, ArrayView<T> output)
{
ThreadInfo<> ti;
if (ti.thread_id >= data.size()) return;
auto v = data[ti.thread_id];
v = warpReduceSum(v);
if (ti.thread_id == 0) output[0] = v;
}
template <typename T>
__device__ inline T blockReduceSum(T val, T& blockSum)
{
int lane = threadIdx.x & (SAIGA_WARP_SIZE - 1);
// Each warp reduces with registers
val = warpReduceSum(val);
// Init shared memory
if (threadIdx.x == 0) blockSum = T(0);
__syncthreads();
// The first thread in each warp writes to smem
if (lane == 0)
{
atomicAdd(&blockSum, val);
}
__syncthreads();
// The first thread in this block has the result
// Optional: remove if so that every thread has the result
if (threadIdx.x == 0) val = blockSum;
return val;
}
template <typename T>
__global__ static void blockReduceSimple(ArrayView<T> data, ArrayView<T> output)
{
ThreadInfo<> ti;
if (ti.thread_id >= data.size()) return;
__shared__ T blockSum;
auto v = data[ti.thread_id];
v = blockReduceSum(v, blockSum);
if (ti.local_thread_id == 0) output[0] = v;
}
template <typename T>
__global__ static void globalReduceSimple(ArrayView<T> data, ArrayView<T> output)
{
ThreadInfo<> ti;
__shared__ T blockSum;
// All threads needs to participate
// -> reduce a 0 for out-of-bounds threads
auto v = ti.thread_id >= data.size() ? 0 : data[ti.thread_id];
v = blockReduceSum(v, blockSum);
if (ti.local_thread_id == 0) atomicAdd(output.data(), v);
}
struct Particle
{
vec3 position;
float radius = 0;
};
struct MaxRadius
{
HD Particle operator()(const Particle& p1, const Particle& p2) { return p1.radius < p2.radius ? p2 : p1; }
};
static void reduceTest()
{
int N = 823674;
using T = int;
Saiga::pinned_vector<T> h_data(N);
for (auto& f : h_data)
{
f = rand() % 10;
}
thrust::device_vector<T> d_data = h_data;
thrust::device_vector<T> output(1);
{
int n = 32;
// Reduce only the first n elements
thrust::device_vector<T> data(d_data.begin(), d_data.begin() + n);
warpReduceSimple<T><<<1, n>>>(data, output);
// Validate output with thrust::reduce
T res = output[0];
T tres = thrust::reduce(data.begin(), data.end());
std::cout << "warpReduceSimple=" << res << ", thrust::reduce=" << tres << std::endl;
SAIGA_ASSERT(res == tres);
}
{
int n = 256;
// Reduce only the first n elements
thrust::device_vector<T> data(d_data.begin(), d_data.begin() + n);
blockReduceSimple<T><<<1, n>>>(data, output);
// Validate output with thrust::reduce
T res = output[0];
T tres = thrust::reduce(data.begin(), data.end());
std::cout << "blockReduceSimple=" << res << ", thrust::reduce=" << tres << std::endl;
SAIGA_ASSERT(res == tres);
}
{
// Reduce everything
output[0] = 0;
globalReduceSimple<T><<<THREAD_BLOCK(N, 128)>>>(d_data, output);
// Validate output with thrust::reduce
T res = output[0];
T tres = thrust::reduce(d_data.begin(), d_data.end());
std::cout << "globalReduceSimple=" << res << ", thrust::reduce=" << tres << std::endl;
SAIGA_ASSERT(res == tres);
}
{
// thrust::reduce with a custom reduce operator
// Here: Finding the particle with the largest radius
thrust::device_vector<Particle> particles(100000);
Particle test;
test.radius = 12314;
particles[100] = test;
Particle p = thrust::reduce(particles.begin(), particles.end(), Particle(), MaxRadius());
std::cout << "Max radius = " << p.radius << std::endl;
SAIGA_ASSERT(test.radius == p.radius);
}
}
int main(int argc, char* argv[])
{
reduceTest();
std::cout << "Done." << std::endl;
}
#endif
|
970ef4b204a918a879549453f97902e53ffdb466.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/operators/sequence_expand_as_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using LoDTensor = framework::LoDTensor;
template <typename T>
static __global__ void sequence_expand_as_kernel(const T *in_data,
const size_t *expand_offset,
const size_t src_hight,
const size_t src_widht,
T *out_data) {
for (int h_id = blockIdx.x; h_id < src_hight; h_id += gridDim.x) {
int span = expand_offset[h_id + 1] - expand_offset[h_id];
if (span == 0) continue;
const T *src = in_data + h_id * src_widht;
for (int w_id = threadIdx.x; w_id < src_widht; w_id += blockDim.x) {
T ele = src[w_id];
int offset = expand_offset[h_id] * src_widht;
for (int k = 0; k < span; ++k) {
out_data[offset + k * src_widht + w_id] = ele;
}
}
}
}
template <typename T>
static __global__ void sequence_expand_as_grad_kernel(
const T *dout_data, const size_t *expand_offset, const size_t dst_hight,
const size_t dst_width, T *dx_data) {
for (int h_id = blockIdx.x; h_id < dst_hight; h_id += gridDim.x) {
T *dst = dx_data + h_id * dst_width;
int span = expand_offset[h_id + 1] - expand_offset[h_id];
for (int w_id = threadIdx.x; w_id < dst_width; w_id += blockDim.x) {
T result = 0;
for (int k = 0; k < span; ++k) {
int offset = (expand_offset[h_id] + k) * dst_width;
const T *src = dout_data + offset;
result += src[w_id];
}
dst[w_id] = result;
}
}
}
template <typename T>
struct SequenceExpandFunctor<platform::CUDADeviceContext, T> {
void operator()(
const platform::CUDADeviceContext &context, const LoDTensor &x,
const framework::Vector<size_t> &ref_lod, /*expand referenced lod*/
LoDTensor *out) {
int hight = x.dims()[0];
int width = framework::product(x.dims()) / hight;
const int kThreadsPerBlock = 1024;
int thread_x = kThreadsPerBlock;
if (width < kThreadsPerBlock) { // block_cols is aligned by 32.
thread_x = ((width + 31) >> 5) << 5;
}
int max_threads = context.GetMaxPhysicalThreadCount();
int block_x = ::max(max_threads / thread_x, 1);
dim3 block_size(thread_x);
dim3 grid_size(block_x);
hipLaunchKernelGGL(( sequence_expand_as_kernel), dim3(grid_size), dim3(block_size), 0, context.stream(),
x.data<T>(), ref_lod.CUDAData(context.GetPlace()), hight, width,
out->mutable_data<T>(context.GetPlace()));
}
};
template <typename T>
struct SequenceExpandAsGradFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext &context,
const LoDTensor &dout,
const framework::Vector<size_t> &ref_lod, /*expand based lod*/
LoDTensor *dx) {
int hight = dx->dims()[0];
int width = framework::product(dx->dims()) / hight;
const int kThreadsPerBlock = 1024;
int thread_x = kThreadsPerBlock;
if (width < kThreadsPerBlock) { // block_cols is aligned by 32.
thread_x = ((width + 31) >> 5) << 5;
}
int max_threads = context.GetMaxPhysicalThreadCount();
int block_x = ::max(max_threads / thread_x, 1);
dim3 block_size(thread_x);
dim3 grid_size(block_x);
hipLaunchKernelGGL(( sequence_expand_as_grad_kernel), dim3(grid_size), dim3(block_size), 0,
context.stream(),
dout.data<T>(), ref_lod.CUDAData(context.GetPlace()), hight, width,
dx->mutable_data<T>(context.GetPlace()));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
sequence_expand_as,
ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, float>,
ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, double>,
ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, int>,
ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
sequence_expand_as_grad,
ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext,
double>,
ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, int>,
ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext,
int64_t>);
| 970ef4b204a918a879549453f97902e53ffdb466.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/operators/sequence_expand_as_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using LoDTensor = framework::LoDTensor;
template <typename T>
static __global__ void sequence_expand_as_kernel(const T *in_data,
const size_t *expand_offset,
const size_t src_hight,
const size_t src_widht,
T *out_data) {
for (int h_id = blockIdx.x; h_id < src_hight; h_id += gridDim.x) {
int span = expand_offset[h_id + 1] - expand_offset[h_id];
if (span == 0) continue;
const T *src = in_data + h_id * src_widht;
for (int w_id = threadIdx.x; w_id < src_widht; w_id += blockDim.x) {
T ele = src[w_id];
int offset = expand_offset[h_id] * src_widht;
for (int k = 0; k < span; ++k) {
out_data[offset + k * src_widht + w_id] = ele;
}
}
}
}
template <typename T>
static __global__ void sequence_expand_as_grad_kernel(
const T *dout_data, const size_t *expand_offset, const size_t dst_hight,
const size_t dst_width, T *dx_data) {
for (int h_id = blockIdx.x; h_id < dst_hight; h_id += gridDim.x) {
T *dst = dx_data + h_id * dst_width;
int span = expand_offset[h_id + 1] - expand_offset[h_id];
for (int w_id = threadIdx.x; w_id < dst_width; w_id += blockDim.x) {
T result = 0;
for (int k = 0; k < span; ++k) {
int offset = (expand_offset[h_id] + k) * dst_width;
const T *src = dout_data + offset;
result += src[w_id];
}
dst[w_id] = result;
}
}
}
template <typename T>
struct SequenceExpandFunctor<platform::CUDADeviceContext, T> {
void operator()(
const platform::CUDADeviceContext &context, const LoDTensor &x,
const framework::Vector<size_t> &ref_lod, /*expand referenced lod*/
LoDTensor *out) {
int hight = x.dims()[0];
int width = framework::product(x.dims()) / hight;
const int kThreadsPerBlock = 1024;
int thread_x = kThreadsPerBlock;
if (width < kThreadsPerBlock) { // block_cols is aligned by 32.
thread_x = ((width + 31) >> 5) << 5;
}
int max_threads = context.GetMaxPhysicalThreadCount();
int block_x = std::max(max_threads / thread_x, 1);
dim3 block_size(thread_x);
dim3 grid_size(block_x);
sequence_expand_as_kernel<<<grid_size, block_size, 0, context.stream()>>>(
x.data<T>(), ref_lod.CUDAData(context.GetPlace()), hight, width,
out->mutable_data<T>(context.GetPlace()));
}
};
template <typename T>
struct SequenceExpandAsGradFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext &context,
const LoDTensor &dout,
const framework::Vector<size_t> &ref_lod, /*expand based lod*/
LoDTensor *dx) {
int hight = dx->dims()[0];
int width = framework::product(dx->dims()) / hight;
const int kThreadsPerBlock = 1024;
int thread_x = kThreadsPerBlock;
if (width < kThreadsPerBlock) { // block_cols is aligned by 32.
thread_x = ((width + 31) >> 5) << 5;
}
int max_threads = context.GetMaxPhysicalThreadCount();
int block_x = std::max(max_threads / thread_x, 1);
dim3 block_size(thread_x);
dim3 grid_size(block_x);
sequence_expand_as_grad_kernel<<<grid_size, block_size, 0,
context.stream()>>>(
dout.data<T>(), ref_lod.CUDAData(context.GetPlace()), hight, width,
dx->mutable_data<T>(context.GetPlace()));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
sequence_expand_as,
ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, float>,
ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, double>,
ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, int>,
ops::SequenceExpandAsKernel<paddle::platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
sequence_expand_as_grad,
ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext,
double>,
ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext, int>,
ops::SequenceExpandAsGradKernel<paddle::platform::CUDADeviceContext,
int64_t>);
|
weighted_sum.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/core/TensorAccessor.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <vector>
// TODO(gkioxari) support all data types once AtomicAdd supports doubles.
// Currently, support is for floats only.
__global__ void weightedSumCudaForwardKernel(
// clang-format off
at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> result,
const at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> features,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
// clang-format on
const int64_t batch_size = result.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
// Get the batch and index
const int batch = blockIdx.x;
const int num_pixels = C * W * H;
const int num_threads = gridDim.y * blockDim.x;
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
// Parallelize over each feature in each pixel in images of size H * W,
// for each image in the batch of size batch_size
for (int pid = tid; pid < num_pixels; pid += num_threads) {
int ch = pid / (W * H);
int j = (pid % (W * H)) / H;
int i = (pid % (W * H)) % H;
// Iterate through the closest K points for this pixel
for (int k = 0; k < points_idx.size(1); ++k) {
int n_idx = points_idx[batch][k][j][i];
// Sentinel value is -1 indicating no point overlaps the pixel
if (n_idx < 0) {
continue;
}
// Accumulate the values
float alpha = alphas[batch][k][j][i];
// TODO(gkioxari) It might be more efficient to have threads write in a
// local variable, and move atomicAdd outside of the loop such that
// atomicAdd is executed once per thread.
atomicAdd(&result[batch][ch][j][i], features[ch][n_idx] * alpha);
}
}
}
// TODO(gkioxari) support all data types once AtomicAdd supports doubles.
// Currently, support is for floats only.
__global__ void weightedSumCudaBackwardKernel(
// clang-format off
at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> grad_features,
at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> grad_alphas,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> grad_outputs,
const at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> features,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
// clang-format on
const int64_t batch_size = points_idx.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
// Get the batch and index
const int batch = blockIdx.x;
const int num_pixels = C * W * H;
const int num_threads = gridDim.y * blockDim.x;
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
// Iterate over each pixel to compute the contribution to the
// gradient for the features and weights
for (int pid = tid; pid < num_pixels; pid += num_threads) {
int ch = pid / (W * H);
int j = (pid % (W * H)) / H;
int i = (pid % (W * H)) % H;
// Iterate through the closest K points for this pixel
for (int k = 0; k < points_idx.size(1); ++k) {
int n_idx = points_idx[batch][k][j][i];
// Sentinel value is -1 indicating no point overlaps the pixel
if (n_idx < 0) {
continue;
}
float alpha = alphas[batch][k][j][i];
// TODO(gkioxari) It might be more efficient to have threads write in a
// local variable, and move atomicAdd outside of the loop such that
// atomicAdd is executed once per thread.
atomicAdd(
&grad_alphas[batch][k][j][i],
features[ch][n_idx] * grad_outputs[batch][ch][j][i]);
atomicAdd(
&grad_features[ch][n_idx], alpha * grad_outputs[batch][ch][j][i]);
}
}
}
at::Tensor weightedSumCudaForward(
const at::Tensor& features,
const at::Tensor& alphas,
const at::Tensor& points_idx) {
// Check inputs are on the same device
at::TensorArg features_t{features, "features", 1},
alphas_t{alphas, "alphas", 2}, points_idx_t{points_idx, "points_idx", 3};
at::CheckedFrom c = "weightedSumCudaForward";
at::checkAllSameGPU(c, {features_t, alphas_t, points_idx_t});
at::checkAllSameType(c, {features_t, alphas_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(features.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t batch_size = points_idx.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
auto result = at::zeros({batch_size, C, H, W}, features.options());
if (result.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return result;
}
const dim3 threadsPerBlock(64);
const dim3 numBlocks(batch_size, 1024 / batch_size + 1);
// TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
// doubles. Currently, support is for floats only.
hipLaunchKernelGGL(( weightedSumCudaForwardKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, stream,
// clang-format off
result.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
points_idx.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>());
// clang-format on
AT_CUDA_CHECK(hipGetLastError());
return result;
}
std::tuple<at::Tensor, at::Tensor> weightedSumCudaBackward(
const at::Tensor& grad_outputs,
const at::Tensor& features,
const at::Tensor& alphas,
const at::Tensor& points_idx) {
// Check inputs are on the same device
at::TensorArg grad_outputs_t{grad_outputs, "grad_outputs", 1},
features_t{features, "features", 2}, alphas_t{alphas, "alphas", 3},
points_idx_t{points_idx, "points_idx", 4};
at::CheckedFrom c = "weightedSumCudaBackward";
at::checkAllSameGPU(c, {grad_outputs_t, features_t, alphas_t, points_idx_t});
at::checkAllSameType(c, {grad_outputs_t, features_t, alphas_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(features.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto grad_features = at::zeros_like(features);
auto grad_alphas = at::zeros_like(alphas);
if (grad_features.numel() == 0 || grad_alphas.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_features, grad_alphas);
}
const int64_t bs = points_idx.size(0);
const dim3 threadsPerBlock(64);
const dim3 numBlocks(bs, 1024 / bs + 1);
// TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
// doubles. Currently, support is for floats only.
hipLaunchKernelGGL(( weightedSumCudaBackwardKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, stream,
// clang-format off
grad_features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
grad_alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
grad_outputs.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
points_idx.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>());
// clang-format on
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_features, grad_alphas);
}
| weighted_sum.cu | // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/core/TensorAccessor.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <vector>
// TODO(gkioxari) support all data types once AtomicAdd supports doubles.
// Currently, support is for floats only.
__global__ void weightedSumCudaForwardKernel(
// clang-format off
at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> result,
const at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> features,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
// clang-format on
const int64_t batch_size = result.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
// Get the batch and index
const int batch = blockIdx.x;
const int num_pixels = C * W * H;
const int num_threads = gridDim.y * blockDim.x;
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
// Parallelize over each feature in each pixel in images of size H * W,
// for each image in the batch of size batch_size
for (int pid = tid; pid < num_pixels; pid += num_threads) {
int ch = pid / (W * H);
int j = (pid % (W * H)) / H;
int i = (pid % (W * H)) % H;
// Iterate through the closest K points for this pixel
for (int k = 0; k < points_idx.size(1); ++k) {
int n_idx = points_idx[batch][k][j][i];
// Sentinel value is -1 indicating no point overlaps the pixel
if (n_idx < 0) {
continue;
}
// Accumulate the values
float alpha = alphas[batch][k][j][i];
// TODO(gkioxari) It might be more efficient to have threads write in a
// local variable, and move atomicAdd outside of the loop such that
// atomicAdd is executed once per thread.
atomicAdd(&result[batch][ch][j][i], features[ch][n_idx] * alpha);
}
}
}
// TODO(gkioxari) support all data types once AtomicAdd supports doubles.
// Currently, support is for floats only.
__global__ void weightedSumCudaBackwardKernel(
// clang-format off
at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> grad_features,
at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> grad_alphas,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> grad_outputs,
const at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> features,
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
// clang-format on
const int64_t batch_size = points_idx.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
// Get the batch and index
const int batch = blockIdx.x;
const int num_pixels = C * W * H;
const int num_threads = gridDim.y * blockDim.x;
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
// Iterate over each pixel to compute the contribution to the
// gradient for the features and weights
for (int pid = tid; pid < num_pixels; pid += num_threads) {
int ch = pid / (W * H);
int j = (pid % (W * H)) / H;
int i = (pid % (W * H)) % H;
// Iterate through the closest K points for this pixel
for (int k = 0; k < points_idx.size(1); ++k) {
int n_idx = points_idx[batch][k][j][i];
// Sentinel value is -1 indicating no point overlaps the pixel
if (n_idx < 0) {
continue;
}
float alpha = alphas[batch][k][j][i];
// TODO(gkioxari) It might be more efficient to have threads write in a
// local variable, and move atomicAdd outside of the loop such that
// atomicAdd is executed once per thread.
atomicAdd(
&grad_alphas[batch][k][j][i],
features[ch][n_idx] * grad_outputs[batch][ch][j][i]);
atomicAdd(
&grad_features[ch][n_idx], alpha * grad_outputs[batch][ch][j][i]);
}
}
}
at::Tensor weightedSumCudaForward(
const at::Tensor& features,
const at::Tensor& alphas,
const at::Tensor& points_idx) {
// Check inputs are on the same device
at::TensorArg features_t{features, "features", 1},
alphas_t{alphas, "alphas", 2}, points_idx_t{points_idx, "points_idx", 3};
at::CheckedFrom c = "weightedSumCudaForward";
at::checkAllSameGPU(c, {features_t, alphas_t, points_idx_t});
at::checkAllSameType(c, {features_t, alphas_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(features.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t batch_size = points_idx.size(0);
const int64_t C = features.size(0);
const int64_t H = points_idx.size(2);
const int64_t W = points_idx.size(3);
auto result = at::zeros({batch_size, C, H, W}, features.options());
if (result.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
const dim3 threadsPerBlock(64);
const dim3 numBlocks(batch_size, 1024 / batch_size + 1);
// TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
// doubles. Currently, support is for floats only.
weightedSumCudaForwardKernel<<<numBlocks, threadsPerBlock, 0, stream>>>(
// clang-format off
result.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
points_idx.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>());
// clang-format on
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
std::tuple<at::Tensor, at::Tensor> weightedSumCudaBackward(
const at::Tensor& grad_outputs,
const at::Tensor& features,
const at::Tensor& alphas,
const at::Tensor& points_idx) {
// Check inputs are on the same device
at::TensorArg grad_outputs_t{grad_outputs, "grad_outputs", 1},
features_t{features, "features", 2}, alphas_t{alphas, "alphas", 3},
points_idx_t{points_idx, "points_idx", 4};
at::CheckedFrom c = "weightedSumCudaBackward";
at::checkAllSameGPU(c, {grad_outputs_t, features_t, alphas_t, points_idx_t});
at::checkAllSameType(c, {grad_outputs_t, features_t, alphas_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(features.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto grad_features = at::zeros_like(features);
auto grad_alphas = at::zeros_like(alphas);
if (grad_features.numel() == 0 || grad_alphas.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_features, grad_alphas);
}
const int64_t bs = points_idx.size(0);
const dim3 threadsPerBlock(64);
const dim3 numBlocks(bs, 1024 / bs + 1);
// TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
// doubles. Currently, support is for floats only.
weightedSumCudaBackwardKernel<<<numBlocks, threadsPerBlock, 0, stream>>>(
// clang-format off
grad_features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
grad_alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
grad_outputs.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
points_idx.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>());
// clang-format on
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_features, grad_alphas);
}
|
d056b4f29c6ee9fb065dce1469946f038e52923e.hip | // !!! This is a file automatically generated by hipify!!!
/*
This file contains routines for Parallel vector operations.
*/
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
#include <petsc/private/cudavecimpl.h>
/*MC
VECCUDA - VECCUDA = "cuda" - A VECSEQCUDA on a single-process communicator, and VECMPICUDA otherwise.
Options Database Keys:
. -vec_type cuda - sets the vector type to VECCUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECSEQCUDA, VECMPICUDA, VECSTANDARD, VecType, VecCreateMPI(), VecSetPinnedMemoryMin()
M*/
PetscErrorCode VecDestroy_MPICUDA(Vec v)
{
Vec_MPI *vecmpi = (Vec_MPI*)v->data;
Vec_CUDA *veccuda;
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
if (v->spptr) {
veccuda = (Vec_CUDA*)v->spptr;
if (veccuda->GPUarray_allocated) {
err = hipFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
veccuda->GPUarray_allocated = NULL;
}
if (veccuda->stream) {
err = hipStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
}
if (v->pinned_memory) {
ierr = PetscMallocSetCUDAHost();CHKERRQ(ierr);
ierr = PetscFree(vecmpi->array_allocated);CHKERRQ(ierr);
ierr = PetscMallocResetCUDAHost();CHKERRQ(ierr);
v->pinned_memory = PETSC_FALSE;
}
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_MPI(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z)
{
PetscReal sum,work = 0.0;
PetscErrorCode ierr;
PetscFunctionBegin;
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecNorm_SeqCUDA(xin,NORM_2,&work);CHKERRQ(ierr);
work *= work;
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = PetscSqrtReal(sum);
} else if (type == NORM_1) {
/* Find the local part */
ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else if (type == NORM_INFINITY) {
/* Find the local max */
ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else if (type == NORM_1_AND_2) {
PetscReal temp[2];
ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr);
temp[1] = temp[1]*temp[1];
ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
z[1] = PetscSqrtReal(z[1]);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
{
PetscScalar awork[128],*work = awork;
PetscErrorCode ierr;
PetscFunctionBegin;
if (nv > 128) {
ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr);
}
ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
if (nv > 128) {
ierr = PetscFree(work);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA
Options Database Keys:
. -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecSetPinnedMemoryMin()
M*/
PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v)
{
PetscErrorCode ierr;
Vec_MPI *vw,*w = (Vec_MPI*)win->data;
PetscScalar *array;
PetscFunctionBegin;
ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr);
ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr);
vw = (Vec_MPI*)(*v)->data;
ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
/* save local representation of the parallel vector (and scatter) if it exists */
if (w->localrep) {
ierr = VecGetArray(*v,&array);CHKERRQ(ierr);
ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr);
ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr);
vw->localupdate = w->localupdate;
if (vw->localupdate) {
ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr);
}
}
/* New vector should inherit stashing property of parent */
(*v)->stash.donotstash = win->stash.donotstash;
(*v)->stash.ignorenegidx = win->stash.ignorenegidx;
/* change type_name appropriately */
ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr);
ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr);
ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr);
(*v)->map->bs = PetscAbs(win->map->bs);
(*v)->bstash.bs = win->bstash.bs;
PetscFunctionReturn(0);
}
PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
{
PetscErrorCode ierr;
PetscScalar work[2],sum[2];
PetscFunctionBegin;
ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRMPI(ierr);
*dp = sum[0];
*nm = sum[1];
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA(Vec vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr);
ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_CUDA(Vec v)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRMPI(ierr);
if (size == 1) {
ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr);
} else {
ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArray - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- array - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
If the user-provided array is NULL, then VecCUDAPlaceArray() can be used
at a later stage to SET the array for storing the vector values.
PETSc does NOT free the array when the vector is destroyed via VecDestroy().
The user should not free the array until the vector is destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
@*/
PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr);
ierr = VecCreate(comm,vv);CHKERRQ(ierr);
ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr);
ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArrays - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- cpuarray - the user provided CPU array to store the vector values
- gpuarray - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
If both cpuarray and gpuarray are provided, the caller must ensure that
the provided arrays have identical values.
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
PETSc does NOT free the provided arrays when the vector is destroyed via
VecDestroy(). The user should not free the array until the vector is
destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArrays(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecCUDAPlaceArray(), VecPlaceArray(),
VecCUDAAllocateCheckHost()
@*/
PetscErrorCode VecCreateMPICUDAWithArrays(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar cpuarray[],const PetscScalar gpuarray[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCreateMPICUDAWithArray(comm,bs,n,N,gpuarray,vv);CHKERRQ(ierr);
if (cpuarray && gpuarray) {
Vec_MPI *s = (Vec_MPI*)((*vv)->data);
s->array = (PetscScalar*)cpuarray;
(*vv)->offloadmask = PETSC_OFFLOAD_BOTH;
} else if (cpuarray) {
Vec_MPI *s = (Vec_MPI*)((*vv)->data);
s->array = (PetscScalar*)cpuarray;
(*vv)->offloadmask = PETSC_OFFLOAD_CPU;
} else if (gpuarray) {
(*vv)->offloadmask = PETSC_OFFLOAD_GPU;
} else {
(*vv)->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecMax_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z)
{
PetscErrorCode ierr;
PetscReal work;
PetscFunctionBegin;
ierr = VecMax_SeqCUDA(xin,idx,&work);CHKERRQ(ierr);
if (!idx) {
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else {
struct { PetscReal v; PetscInt i; } in,out;
in.v = work;
in.i = *idx + xin->map->rstart;
ierr = MPIU_Allreduce(&in,&out,1,MPIU_REAL_INT,MPIU_MAXLOC,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = out.v;
*idx = out.i;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecMin_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z)
{
PetscErrorCode ierr;
PetscReal work;
PetscFunctionBegin;
ierr = VecMin_SeqCUDA(xin,idx,&work);CHKERRQ(ierr);
if (!idx) {
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else {
struct { PetscReal v; PetscInt i; } in,out;
in.v = work;
in.i = *idx + xin->map->rstart;
ierr = MPIU_Allreduce(&in,&out,1,MPIU_REAL_INT,MPIU_MINLOC,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = out.v;
*idx = out.i;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecBindToCPU_MPICUDA(Vec V,PetscBool pin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
V->boundtocpu = pin;
if (pin) {
ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr);
V->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */
V->ops->dotnorm2 = NULL;
V->ops->waxpy = VecWAXPY_Seq;
V->ops->dot = VecDot_MPI;
V->ops->mdot = VecMDot_MPI;
V->ops->tdot = VecTDot_MPI;
V->ops->norm = VecNorm_MPI;
V->ops->scale = VecScale_Seq;
V->ops->copy = VecCopy_Seq;
V->ops->set = VecSet_Seq;
V->ops->swap = VecSwap_Seq;
V->ops->axpy = VecAXPY_Seq;
V->ops->axpby = VecAXPBY_Seq;
V->ops->maxpy = VecMAXPY_Seq;
V->ops->aypx = VecAYPX_Seq;
V->ops->axpbypcz = VecAXPBYPCZ_Seq;
V->ops->pointwisemult = VecPointwiseMult_Seq;
V->ops->setrandom = VecSetRandom_Seq;
V->ops->placearray = VecPlaceArray_Seq;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_Seq;
V->ops->dot_local = VecDot_Seq;
V->ops->tdot_local = VecTDot_Seq;
V->ops->norm_local = VecNorm_Seq;
V->ops->mdot_local = VecMDot_Seq;
V->ops->pointwisedivide = VecPointwiseDivide_Seq;
V->ops->getlocalvector = NULL;
V->ops->restorelocalvector = NULL;
V->ops->getlocalvectorread = NULL;
V->ops->restorelocalvectorread = NULL;
V->ops->getarraywrite = NULL;
V->ops->max = VecMax_MPI;
V->ops->min = VecMin_MPI;
V->ops->reciprocal = VecReciprocal_Default;
V->ops->sum = NULL;
V->ops->shift = NULL;
/* default random number generator */
ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr);
ierr = PetscStrallocpy(PETSCRANDER48,&V->defaultrandtype);CHKERRQ(ierr);
} else {
V->ops->dotnorm2 = VecDotNorm2_MPICUDA;
V->ops->waxpy = VecWAXPY_SeqCUDA;
V->ops->duplicate = VecDuplicate_MPICUDA;
V->ops->dot = VecDot_MPICUDA;
V->ops->mdot = VecMDot_MPICUDA;
V->ops->tdot = VecTDot_MPICUDA;
V->ops->norm = VecNorm_MPICUDA;
V->ops->scale = VecScale_SeqCUDA;
V->ops->copy = VecCopy_SeqCUDA;
V->ops->set = VecSet_SeqCUDA;
V->ops->swap = VecSwap_SeqCUDA;
V->ops->axpy = VecAXPY_SeqCUDA;
V->ops->axpby = VecAXPBY_SeqCUDA;
V->ops->maxpy = VecMAXPY_SeqCUDA;
V->ops->aypx = VecAYPX_SeqCUDA;
V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA;
V->ops->pointwisemult = VecPointwiseMult_SeqCUDA;
V->ops->setrandom = VecSetRandom_SeqCUDA;
V->ops->placearray = VecPlaceArray_SeqCUDA;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_SeqCUDA;
V->ops->dot_local = VecDot_SeqCUDA;
V->ops->tdot_local = VecTDot_SeqCUDA;
V->ops->norm_local = VecNorm_SeqCUDA;
V->ops->mdot_local = VecMDot_SeqCUDA;
V->ops->destroy = VecDestroy_MPICUDA;
V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA;
V->ops->getlocalvector = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA;
V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA;
V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA;
V->ops->getarray = VecGetArray_SeqCUDA;
V->ops->restorearray = VecRestoreArray_SeqCUDA;
V->ops->getarrayandmemtype = VecGetArrayAndMemType_SeqCUDA;
V->ops->restorearrayandmemtype = VecRestoreArrayAndMemType_SeqCUDA;
V->ops->max = VecMax_MPICUDA;
V->ops->min = VecMin_MPICUDA;
V->ops->reciprocal = VecReciprocal_SeqCUDA;
V->ops->sum = VecSum_SeqCUDA;
V->ops->shift = VecShift_SeqCUDA;
/* default random number generator */
ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr);
ierr = PetscStrallocpy(PETSCCURAND,&V->defaultrandtype);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
{
PetscErrorCode ierr;
Vec_CUDA *veccuda;
PetscFunctionBegin;
ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr);
ierr = VecBindToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr);
vv->ops->bindtocpu = VecBindToCPU_MPICUDA;
/* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */
if (alloc && !array) {
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
}
if (array) {
if (!vv->spptr) {
PetscReal pinned_memory_min;
PetscBool flag;
/* Cannot use PetscNew() here because spptr is void* */
ierr = PetscCalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)vv->spptr;
vv->minimum_bytes_pinned_memory = 0;
/* Need to parse command line for minimum size to use for pinned memory allocations on host here.
Note: This same code duplicated in VecCreate_SeqCUDA_Private() and VecCUDAAllocateCheck(). Is there a good way to avoid this? */
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)vv),((PetscObject)vv)->prefix,"VECCUDA Options","Vec");CHKERRQ(ierr);
pinned_memory_min = vv->minimum_bytes_pinned_memory;
ierr = PetscOptionsReal("-vec_pinned_memory_min","Minimum size (in bytes) for an allocation to use pinned memory on host","VecSetPinnedMemoryMin",pinned_memory_min,&pinned_memory_min,&flag);CHKERRQ(ierr);
if (flag) vv->minimum_bytes_pinned_memory = pinned_memory_min;
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->GPUarray = (PetscScalar*)array;
vv->offloadmask = PETSC_OFFLOAD_GPU;
}
PetscFunctionReturn(0);
}
| d056b4f29c6ee9fb065dce1469946f038e52923e.cu |
/*
This file contains routines for Parallel vector operations.
*/
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
#include <petsc/private/cudavecimpl.h>
/*MC
VECCUDA - VECCUDA = "cuda" - A VECSEQCUDA on a single-process communicator, and VECMPICUDA otherwise.
Options Database Keys:
. -vec_type cuda - sets the vector type to VECCUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECSEQCUDA, VECMPICUDA, VECSTANDARD, VecType, VecCreateMPI(), VecSetPinnedMemoryMin()
M*/
PetscErrorCode VecDestroy_MPICUDA(Vec v)
{
Vec_MPI *vecmpi = (Vec_MPI*)v->data;
Vec_CUDA *veccuda;
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
if (v->spptr) {
veccuda = (Vec_CUDA*)v->spptr;
if (veccuda->GPUarray_allocated) {
err = cudaFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
veccuda->GPUarray_allocated = NULL;
}
if (veccuda->stream) {
err = cudaStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
}
if (v->pinned_memory) {
ierr = PetscMallocSetCUDAHost();CHKERRQ(ierr);
ierr = PetscFree(vecmpi->array_allocated);CHKERRQ(ierr);
ierr = PetscMallocResetCUDAHost();CHKERRQ(ierr);
v->pinned_memory = PETSC_FALSE;
}
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_MPI(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z)
{
PetscReal sum,work = 0.0;
PetscErrorCode ierr;
PetscFunctionBegin;
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecNorm_SeqCUDA(xin,NORM_2,&work);CHKERRQ(ierr);
work *= work;
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = PetscSqrtReal(sum);
} else if (type == NORM_1) {
/* Find the local part */
ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else if (type == NORM_INFINITY) {
/* Find the local max */
ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else if (type == NORM_1_AND_2) {
PetscReal temp[2];
ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr);
temp[1] = temp[1]*temp[1];
ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
z[1] = PetscSqrtReal(z[1]);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
{
PetscScalar awork[128],*work = awork;
PetscErrorCode ierr;
PetscFunctionBegin;
if (nv > 128) {
ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr);
}
ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
if (nv > 128) {
ierr = PetscFree(work);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA
Options Database Keys:
. -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecSetPinnedMemoryMin()
M*/
PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v)
{
PetscErrorCode ierr;
Vec_MPI *vw,*w = (Vec_MPI*)win->data;
PetscScalar *array;
PetscFunctionBegin;
ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr);
ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr);
vw = (Vec_MPI*)(*v)->data;
ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
/* save local representation of the parallel vector (and scatter) if it exists */
if (w->localrep) {
ierr = VecGetArray(*v,&array);CHKERRQ(ierr);
ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr);
ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr);
vw->localupdate = w->localupdate;
if (vw->localupdate) {
ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr);
}
}
/* New vector should inherit stashing property of parent */
(*v)->stash.donotstash = win->stash.donotstash;
(*v)->stash.ignorenegidx = win->stash.ignorenegidx;
/* change type_name appropriately */
ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr);
ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr);
ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr);
(*v)->map->bs = PetscAbs(win->map->bs);
(*v)->bstash.bs = win->bstash.bs;
PetscFunctionReturn(0);
}
PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
{
PetscErrorCode ierr;
PetscScalar work[2],sum[2];
PetscFunctionBegin;
ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRMPI(ierr);
*dp = sum[0];
*nm = sum[1];
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA(Vec vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr);
ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_CUDA(Vec v)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRMPI(ierr);
if (size == 1) {
ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr);
} else {
ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArray - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- array - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
If the user-provided array is NULL, then VecCUDAPlaceArray() can be used
at a later stage to SET the array for storing the vector values.
PETSc does NOT free the array when the vector is destroyed via VecDestroy().
The user should not free the array until the vector is destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
@*/
PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr);
ierr = VecCreate(comm,vv);CHKERRQ(ierr);
ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr);
ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArrays - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- cpuarray - the user provided CPU array to store the vector values
- gpuarray - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
If both cpuarray and gpuarray are provided, the caller must ensure that
the provided arrays have identical values.
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
PETSc does NOT free the provided arrays when the vector is destroyed via
VecDestroy(). The user should not free the array until the vector is
destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArrays(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecCUDAPlaceArray(), VecPlaceArray(),
VecCUDAAllocateCheckHost()
@*/
PetscErrorCode VecCreateMPICUDAWithArrays(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar cpuarray[],const PetscScalar gpuarray[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCreateMPICUDAWithArray(comm,bs,n,N,gpuarray,vv);CHKERRQ(ierr);
if (cpuarray && gpuarray) {
Vec_MPI *s = (Vec_MPI*)((*vv)->data);
s->array = (PetscScalar*)cpuarray;
(*vv)->offloadmask = PETSC_OFFLOAD_BOTH;
} else if (cpuarray) {
Vec_MPI *s = (Vec_MPI*)((*vv)->data);
s->array = (PetscScalar*)cpuarray;
(*vv)->offloadmask = PETSC_OFFLOAD_CPU;
} else if (gpuarray) {
(*vv)->offloadmask = PETSC_OFFLOAD_GPU;
} else {
(*vv)->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecMax_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z)
{
PetscErrorCode ierr;
PetscReal work;
PetscFunctionBegin;
ierr = VecMax_SeqCUDA(xin,idx,&work);CHKERRQ(ierr);
if (!idx) {
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else {
struct { PetscReal v; PetscInt i; } in,out;
in.v = work;
in.i = *idx + xin->map->rstart;
ierr = MPIU_Allreduce(&in,&out,1,MPIU_REAL_INT,MPIU_MAXLOC,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = out.v;
*idx = out.i;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecMin_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z)
{
PetscErrorCode ierr;
PetscReal work;
PetscFunctionBegin;
ierr = VecMin_SeqCUDA(xin,idx,&work);CHKERRQ(ierr);
if (!idx) {
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else {
struct { PetscReal v; PetscInt i; } in,out;
in.v = work;
in.i = *idx + xin->map->rstart;
ierr = MPIU_Allreduce(&in,&out,1,MPIU_REAL_INT,MPIU_MINLOC,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = out.v;
*idx = out.i;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecBindToCPU_MPICUDA(Vec V,PetscBool pin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
V->boundtocpu = pin;
if (pin) {
ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr);
V->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */
V->ops->dotnorm2 = NULL;
V->ops->waxpy = VecWAXPY_Seq;
V->ops->dot = VecDot_MPI;
V->ops->mdot = VecMDot_MPI;
V->ops->tdot = VecTDot_MPI;
V->ops->norm = VecNorm_MPI;
V->ops->scale = VecScale_Seq;
V->ops->copy = VecCopy_Seq;
V->ops->set = VecSet_Seq;
V->ops->swap = VecSwap_Seq;
V->ops->axpy = VecAXPY_Seq;
V->ops->axpby = VecAXPBY_Seq;
V->ops->maxpy = VecMAXPY_Seq;
V->ops->aypx = VecAYPX_Seq;
V->ops->axpbypcz = VecAXPBYPCZ_Seq;
V->ops->pointwisemult = VecPointwiseMult_Seq;
V->ops->setrandom = VecSetRandom_Seq;
V->ops->placearray = VecPlaceArray_Seq;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_Seq;
V->ops->dot_local = VecDot_Seq;
V->ops->tdot_local = VecTDot_Seq;
V->ops->norm_local = VecNorm_Seq;
V->ops->mdot_local = VecMDot_Seq;
V->ops->pointwisedivide = VecPointwiseDivide_Seq;
V->ops->getlocalvector = NULL;
V->ops->restorelocalvector = NULL;
V->ops->getlocalvectorread = NULL;
V->ops->restorelocalvectorread = NULL;
V->ops->getarraywrite = NULL;
V->ops->max = VecMax_MPI;
V->ops->min = VecMin_MPI;
V->ops->reciprocal = VecReciprocal_Default;
V->ops->sum = NULL;
V->ops->shift = NULL;
/* default random number generator */
ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr);
ierr = PetscStrallocpy(PETSCRANDER48,&V->defaultrandtype);CHKERRQ(ierr);
} else {
V->ops->dotnorm2 = VecDotNorm2_MPICUDA;
V->ops->waxpy = VecWAXPY_SeqCUDA;
V->ops->duplicate = VecDuplicate_MPICUDA;
V->ops->dot = VecDot_MPICUDA;
V->ops->mdot = VecMDot_MPICUDA;
V->ops->tdot = VecTDot_MPICUDA;
V->ops->norm = VecNorm_MPICUDA;
V->ops->scale = VecScale_SeqCUDA;
V->ops->copy = VecCopy_SeqCUDA;
V->ops->set = VecSet_SeqCUDA;
V->ops->swap = VecSwap_SeqCUDA;
V->ops->axpy = VecAXPY_SeqCUDA;
V->ops->axpby = VecAXPBY_SeqCUDA;
V->ops->maxpy = VecMAXPY_SeqCUDA;
V->ops->aypx = VecAYPX_SeqCUDA;
V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA;
V->ops->pointwisemult = VecPointwiseMult_SeqCUDA;
V->ops->setrandom = VecSetRandom_SeqCUDA;
V->ops->placearray = VecPlaceArray_SeqCUDA;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_SeqCUDA;
V->ops->dot_local = VecDot_SeqCUDA;
V->ops->tdot_local = VecTDot_SeqCUDA;
V->ops->norm_local = VecNorm_SeqCUDA;
V->ops->mdot_local = VecMDot_SeqCUDA;
V->ops->destroy = VecDestroy_MPICUDA;
V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA;
V->ops->getlocalvector = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA;
V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA;
V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA;
V->ops->getarray = VecGetArray_SeqCUDA;
V->ops->restorearray = VecRestoreArray_SeqCUDA;
V->ops->getarrayandmemtype = VecGetArrayAndMemType_SeqCUDA;
V->ops->restorearrayandmemtype = VecRestoreArrayAndMemType_SeqCUDA;
V->ops->max = VecMax_MPICUDA;
V->ops->min = VecMin_MPICUDA;
V->ops->reciprocal = VecReciprocal_SeqCUDA;
V->ops->sum = VecSum_SeqCUDA;
V->ops->shift = VecShift_SeqCUDA;
/* default random number generator */
ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr);
ierr = PetscStrallocpy(PETSCCURAND,&V->defaultrandtype);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
{
PetscErrorCode ierr;
Vec_CUDA *veccuda;
PetscFunctionBegin;
ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr);
ierr = VecBindToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr);
vv->ops->bindtocpu = VecBindToCPU_MPICUDA;
/* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */
if (alloc && !array) {
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
}
if (array) {
if (!vv->spptr) {
PetscReal pinned_memory_min;
PetscBool flag;
/* Cannot use PetscNew() here because spptr is void* */
ierr = PetscCalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)vv->spptr;
vv->minimum_bytes_pinned_memory = 0;
/* Need to parse command line for minimum size to use for pinned memory allocations on host here.
Note: This same code duplicated in VecCreate_SeqCUDA_Private() and VecCUDAAllocateCheck(). Is there a good way to avoid this? */
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)vv),((PetscObject)vv)->prefix,"VECCUDA Options","Vec");CHKERRQ(ierr);
pinned_memory_min = vv->minimum_bytes_pinned_memory;
ierr = PetscOptionsReal("-vec_pinned_memory_min","Minimum size (in bytes) for an allocation to use pinned memory on host","VecSetPinnedMemoryMin",pinned_memory_min,&pinned_memory_min,&flag);CHKERRQ(ierr);
if (flag) vv->minimum_bytes_pinned_memory = pinned_memory_min;
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->GPUarray = (PetscScalar*)array;
vv->offloadmask = PETSC_OFFLOAD_GPU;
}
PetscFunctionReturn(0);
}
|
2df52d3408b3227a0ac51a6fe4e7aaa6b65af750.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// Nmero de elementos em cada vetor
#define N 2048 * 2048
__global__ void my_kernel(float scalar, float * x, float * y)
{
// Determina a identificao de thread global exclusiva, por isso sabemos qual elemento processar
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Certifique-se de que ainda temos threads disponveis!
if ( tid < N )
y[tid] = scalar * x[tid] + y[tid];
}
int main()
{
float *x, *y;
// O nmero total de bytes por vetor
int size = N * sizeof (float);
hipError_t ierrAsync;
hipError_t ierrSync;
// Aloca memria
hipMallocManaged(&x, size);
hipMallocManaged(&y, size);
// Inicializa a memria
for( int i = 0; i < N; ++i )
{
x[i] = 1.0f;
y[i] = 2.0f;
}
int threads_per_block = 256;
int number_of_blocks = (N / threads_per_block) + 1;
hipLaunchKernelGGL(( my_kernel) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, 2.0f, x, y );
ierrSync = hipGetLastError();
// Aguarde at que a GPU termine
ierrAsync = hipDeviceSynchronize();
// Verifica status de execuo
if (ierrSync != hipSuccess) { printf("Sync error: %s\n", hipGetErrorString(ierrSync)); }
if (ierrAsync != hipSuccess) { printf("Async error: %s\n", hipGetErrorString(ierrAsync)); }
// Imprime o erro mximo
float maxError = 0;
for( int i = 0; i < N; ++i )
if (abs(4-y[i]) > maxError) { maxError = abs(4-y[i]); }
printf("Max Error: %.5f", maxError);
// Libera a memria alocada
hipFree( x ); hipFree( y );
} | 2df52d3408b3227a0ac51a6fe4e7aaa6b65af750.cu | #include <stdio.h>
// Número de elementos em cada vetor
#define N 2048 * 2048
__global__ void my_kernel(float scalar, float * x, float * y)
{
// Determina a identificação de thread global exclusiva, por isso sabemos qual elemento processar
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Certifique-se de que ainda temos threads disponíveis!
if ( tid < N )
y[tid] = scalar * x[tid] + y[tid];
}
int main()
{
float *x, *y;
// O número total de bytes por vetor
int size = N * sizeof (float);
cudaError_t ierrAsync;
cudaError_t ierrSync;
// Aloca memória
cudaMallocManaged(&x, size);
cudaMallocManaged(&y, size);
// Inicializa a memória
for( int i = 0; i < N; ++i )
{
x[i] = 1.0f;
y[i] = 2.0f;
}
int threads_per_block = 256;
int number_of_blocks = (N / threads_per_block) + 1;
my_kernel <<< number_of_blocks, threads_per_block >>> ( 2.0f, x, y );
ierrSync = cudaGetLastError();
// Aguarde até que a GPU termine
ierrAsync = cudaDeviceSynchronize();
// Verifica status de execução
if (ierrSync != cudaSuccess) { printf("Sync error: %s\n", cudaGetErrorString(ierrSync)); }
if (ierrAsync != cudaSuccess) { printf("Async error: %s\n", cudaGetErrorString(ierrAsync)); }
// Imprime o erro máximo
float maxError = 0;
for( int i = 0; i < N; ++i )
if (abs(4-y[i]) > maxError) { maxError = abs(4-y[i]); }
printf("Max Error: %.5f", maxError);
// Libera a memória alocada
cudaFree( x ); cudaFree( y );
} |
88983149b82b830a30ccb95e373092ec0f24cf3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include "timer.h"
#define SPATIAL_IMPACT 0.2
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(hipGetLastError());
*/
static void checkCudaCall(hipError_t result) {
if (result != hipSuccess) {
cerr << "cuda error: " << hipGetErrorString(result) << endl;
exit(1);
}
}
__global__ void waveKernel(unsigned int n, double* old, double* cur, double* next) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
// Don't calculate the borders or out of the borders.
if(i > 0 && i < n){
next[i] = 2.0 * cur[i] - old[i] +
SPATIAL_IMPACT * ((cur[i - 1] - (2.0 * cur[i] - cur[i + 1])));
}
}
double *computeWaveCuda(int i_max, int t_max, int tpb, double *hOld, double *hCur, double *hNext){
double *dOld, *dCur, *dNext, *tmp;
// Alloc space on the device.
checkCudaCall(hipMalloc((void **) &dOld, i_max * sizeof(double)));
checkCudaCall(hipMalloc((void **) &dCur, i_max * sizeof(double)));
checkCudaCall(hipMalloc((void **) &dNext, i_max * sizeof(double)));
// Copy from main mem to device mem.
checkCudaCall(hipMemcpy(dOld, hOld, i_max*sizeof(double), hipMemcpyHostToDevice));
checkCudaCall(hipMemcpy(dCur, hCur, i_max*sizeof(double), hipMemcpyHostToDevice));
timer waveTimer("Wave timer");
waveTimer.start();
int t;
for(t = 0; t < t_max; t++){
// Start the computation for time = t.
hipLaunchKernelGGL(( waveKernel), dim3((int) ceil((double) i_max / (double) tpb)), dim3(tpb), 0, 0, i_max, dOld, dCur, dNext);
checkCudaCall(hipGetLastError());
// Rotate buffers.
tmp = dOld;
dOld = dCur;
dCur = dNext;
dNext = tmp;
tmp = hOld;
hOld = hCur;
hCur = hNext;
hNext = tmp;
}
waveTimer.stop();
cout << waveTimer;
// Copy back the result from device mem to main mem.
checkCudaCall(hipMemcpy(hCur, dCur, i_max * sizeof(double), hipMemcpyDeviceToHost));
// Free device mem.
checkCudaCall(hipFree(dOld));
checkCudaCall(hipFree(dCur));
checkCudaCall(hipFree(dNext));
return hCur;
}
| 88983149b82b830a30ccb95e373092ec0f24cf3c.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include "timer.h"
#define SPATIAL_IMPACT 0.2
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result) {
if (result != cudaSuccess) {
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(1);
}
}
__global__ void waveKernel(unsigned int n, double* old, double* cur, double* next) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
// Don't calculate the borders or out of the borders.
if(i > 0 && i < n){
next[i] = 2.0 * cur[i] - old[i] +
SPATIAL_IMPACT * ((cur[i - 1] - (2.0 * cur[i] - cur[i + 1])));
}
}
double *computeWaveCuda(int i_max, int t_max, int tpb, double *hOld, double *hCur, double *hNext){
double *dOld, *dCur, *dNext, *tmp;
// Alloc space on the device.
checkCudaCall(cudaMalloc((void **) &dOld, i_max * sizeof(double)));
checkCudaCall(cudaMalloc((void **) &dCur, i_max * sizeof(double)));
checkCudaCall(cudaMalloc((void **) &dNext, i_max * sizeof(double)));
// Copy from main mem to device mem.
checkCudaCall(cudaMemcpy(dOld, hOld, i_max*sizeof(double), cudaMemcpyHostToDevice));
checkCudaCall(cudaMemcpy(dCur, hCur, i_max*sizeof(double), cudaMemcpyHostToDevice));
timer waveTimer("Wave timer");
waveTimer.start();
int t;
for(t = 0; t < t_max; t++){
// Start the computation for time = t.
waveKernel<<<(int) ceil((double) i_max / (double) tpb), tpb>>>(i_max, dOld, dCur, dNext);
checkCudaCall(cudaGetLastError());
// Rotate buffers.
tmp = dOld;
dOld = dCur;
dCur = dNext;
dNext = tmp;
tmp = hOld;
hOld = hCur;
hCur = hNext;
hNext = tmp;
}
waveTimer.stop();
cout << waveTimer;
// Copy back the result from device mem to main mem.
checkCudaCall(cudaMemcpy(hCur, dCur, i_max * sizeof(double), cudaMemcpyDeviceToHost));
// Free device mem.
checkCudaCall(cudaFree(dOld));
checkCudaCall(cudaFree(dCur));
checkCudaCall(cudaFree(dNext));
return hCur;
}
|
d591cb464803ca8a967a8255fea27e1c3dc2da7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* testShuffle.cu
*
* Microdemo to illustrate the workings of Kepler's new shuffle instruction.
*
* Build with: nvcc -I ..\chLib <options> testShuffle.cu
* Requires: SM 3.0 or higher.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <stdio.h>
#include <sm_30_intrinsics.h>
__global__ void
TestShuffle( int *out, const int *in, size_t N )
{
size_t i = blockIdx.x*blockDim.x+threadIdx.x;
int value = (int) i;//in[i];
out[i] = __shfl_up_sync( 0xffffffff, value, 1 );
}
hipError_t
PrintShuffle( int offset, size_t cInts )
{
int *dptr = 0;
hipError_t status;
int h[64];
cuda(Malloc( &dptr, cInts*sizeof(int) ) );
hipLaunchKernelGGL(( TestShuffle), dim3(1),dim3(cInts), 0, 0, dptr, dptr, cInts );
cuda(Memcpy( h, dptr, cInts*sizeof(int), hipMemcpyDeviceToHost ) );
for ( size_t i = 0; i < cInts; i++ ) {
printf( "%3x", h[i] );
if (31==i%32) printf("\n");
}
printf( "\n" );
Error:
hipFree( dptr );
return status;
}
int
main( int argc, char *argv[] )
{
int ret = 1;
int cInts = 64;
hipError_t status;
CUDART_CHECK( PrintShuffle( 1, cInts ) );
return 0;
Error:
printf( "Error %d (%s)\n", status, hipGetErrorString( status ) );
return ret;
}
| d591cb464803ca8a967a8255fea27e1c3dc2da7b.cu | /*
*
* testShuffle.cu
*
* Microdemo to illustrate the workings of Kepler's new shuffle instruction.
*
* Build with: nvcc -I ..\chLib <options> testShuffle.cu
* Requires: SM 3.0 or higher.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <stdio.h>
#include <sm_30_intrinsics.h>
__global__ void
TestShuffle( int *out, const int *in, size_t N )
{
size_t i = blockIdx.x*blockDim.x+threadIdx.x;
int value = (int) i;//in[i];
out[i] = __shfl_up_sync( 0xffffffff, value, 1 );
}
cudaError_t
PrintShuffle( int offset, size_t cInts )
{
int *dptr = 0;
cudaError_t status;
int h[64];
cuda(Malloc( &dptr, cInts*sizeof(int) ) );
TestShuffle<<<1,cInts>>>( dptr, dptr, cInts );
cuda(Memcpy( h, dptr, cInts*sizeof(int), cudaMemcpyDeviceToHost ) );
for ( size_t i = 0; i < cInts; i++ ) {
printf( "%3x", h[i] );
if (31==i%32) printf("\n");
}
printf( "\n" );
Error:
cudaFree( dptr );
return status;
}
int
main( int argc, char *argv[] )
{
int ret = 1;
int cInts = 64;
cudaError_t status;
CUDART_CHECK( PrintShuffle( 1, cInts ) );
return 0;
Error:
printf( "Error %d (%s)\n", status, cudaGetErrorString( status ) );
return ret;
}
|
07d45eee41f4ea0a630f2f5baad1698642bcad15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void minimumCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) < (*in2X)) ? (*in1X) : (*in2X);
}
} | 07d45eee41f4ea0a630f2f5baad1698642bcad15.cu | #include "includes.h"
__global__ void minimumCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
const uint8_t * in1X = in1 + y * rowSizeIn1 + x;
const uint8_t * in2X = in2 + y * rowSizeIn2 + x;
uint8_t * outX = out + y * rowSizeOut + x;
(*outX) = ((*in1X) < (*in2X)) ? (*in1X) : (*in2X);
}
} |
9d3a424562c7dd7524ce7dcbd11d41bf271fed50.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by liang on 2/16/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <groute/event_pool.h>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/work_source.cuh>
#include <groute/device/cta_scheduler.cuh>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/graphs/traversal.h>
#include <utils/stopwatch.h>
#include <moderngpu/context.hxx>
#include <moderngpu/kernel_scan.hxx>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <boost/format.hpp>
#include <utils/cuda_utils.h>
#include "pr_common.h"
DECLARE_double(wl_alloc_factor);
DECLARE_uint64(wl_alloc_abs);
DECLARE_int32(max_pr_iterations);
DECLARE_double(threshold);
DECLARE_int32(grid_size);
DECLARE_int32(block_size);
namespace data_driven_unopt_pr {
template<typename WorkSource,
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankInit__Single__(
WorkSource work_source,
TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
for (index_t ii = 0 + tid; ii < work_source.get_size(); ii += nthreads) {
index_t node = work_source.get_work(ii);
current_ranks[node] = 1.0 - ALPHA;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
}
}
template<
typename WorkSource,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernel__Single__(
WorkSource work_source, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res == 0) continue; // might happen if work_source has duplicates
current_ranks[node] += res;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = res * ALPHA / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
}
}
/*
* The per-device Page Rank problem
*/
template<typename TGraph,
template<typename> class ResidualDatum,
template<typename> class RankDatum>
struct Problem {
TGraph m_graph;
ResidualDatum<rank_t> m_residual;
RankDatum<rank_t> m_current_ranks;
Problem(const TGraph &graph, RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) :
m_graph(graph), m_residual(residual), m_current_ranks(current_ranks) {
}
template<typename WorkSource>
void Init__Single__(const WorkSource &workSource, groute::Stream &stream) const {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_graph.owned_nnodes());
Marker::MarkWorkitems(m_graph.owned_nnodes(), "PageRankInit__Single__");
PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(workSource, m_graph, m_current_ranks, m_residual);
}
template<typename WorkSource>
bool RankCheck__Single__(WorkSource work_source, mgpu::context_t &context) {
rank_t *tmp = m_current_ranks.data_ptr;
auto check_segment_sizes = [=]__device__(int idx) {
return tmp[idx];
};
mgpu::mem_t<double> checkSum(1, context);
mgpu::mem_t<int> deviceOffsets = mgpu::mem_t<int>(work_source.get_size(), context);
int *scanned_offsets = deviceOffsets.data();
mgpu::transform_scan<double>(check_segment_sizes, work_source.get_size(),
scanned_offsets, mgpu::plus_t<double>(), checkSum.data(), context);
double pr_sum = mgpu::from_mem(checkSum)[0];
VLOG(1) << "Checking... SUM: " << pr_sum << " Relative SUM: " << pr_sum / work_source.get_size();
return pr_sum / work_source.get_size() < FLAGS_threshold;
}
template<typename WorkSource>
void
Relax__Single__(const WorkSource &work_source, groute::Stream &stream) {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work_source.get_size());
Marker::MarkWorkitems(work_source.get_size(), "PageRankKernel__Single__");
PageRankKernel__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, m_graph, m_current_ranks, m_residual);
}
};
struct Algo {
static const char *NameLower() { return "pr"; }
static const char *Name() { return "PR"; }
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
}
bool TopologyDrivenUnoptPR() {
VLOG(0) << "TopologyDrivenUnoptPR";
typedef groute::Queue<index_t> Worklist;
groute::graphs::single::NodeOutputDatum<rank_t> residual;
groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
utils::traversal::Context<data_driven_unopt_pr::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
dev_graph_allocator.AllocateDatumObjects(residual, current_ranks);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
groute::Stream stream = context.CreateStream(0);
mgpu::standard_context_t mgpu_context(true, stream.cuda_stream);
data_driven_unopt_pr::Problem<
groute::graphs::dev::CSRGraph,
groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum>
solver(
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
residual.DeviceObject());
Stopwatch sw(true);
solver.Init__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
stream);
int iteration = 0;
bool running = true;
while (running) {
solver.Relax__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()), stream);
stream.Sync();
running = solver.RankCheck__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()), mgpu_context);
if (++iteration > FLAGS_max_pr_iterations) {
LOG(WARNING) << "maximum iterations reached";
break;
}
VLOG(1) << "Iteration: " << iteration;
}
sw.stop();
VLOG(1)
<< boost::format("%s terminated after %d iterations (max: %d)") % data_driven_unopt_pr::Algo::Name() % iteration %
FLAGS_max_pr_iterations;
VLOG(0) << data_driven_unopt_pr::Algo::Name() << ": " << sw.ms() << " ms. <filter>";
// Gather
auto gathered_output = data_driven_unopt_pr::Algo::Gather(dev_graph_allocator, residual, current_ranks);
if (FLAGS_output.length() != 0)
data_driven_unopt_pr::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = data_driven_unopt_pr::Algo::Host(context.host_graph, residual, current_ranks);
return data_driven_unopt_pr::Algo::CheckErrors(gathered_output, regression) == 0;
} else {
LOG(WARNING) << "Result not checked";
return true;
}
} | 9d3a424562c7dd7524ce7dcbd11d41bf271fed50.cu | //
// Created by liang on 2/16/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <groute/event_pool.h>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/work_source.cuh>
#include <groute/device/cta_scheduler.cuh>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/graphs/traversal.h>
#include <utils/stopwatch.h>
#include <moderngpu/context.hxx>
#include <moderngpu/kernel_scan.hxx>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <boost/format.hpp>
#include <utils/cuda_utils.h>
#include "pr_common.h"
DECLARE_double(wl_alloc_factor);
DECLARE_uint64(wl_alloc_abs);
DECLARE_int32(max_pr_iterations);
DECLARE_double(threshold);
DECLARE_int32(grid_size);
DECLARE_int32(block_size);
namespace data_driven_unopt_pr {
template<typename WorkSource,
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankInit__Single__(
WorkSource work_source,
TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
for (index_t ii = 0 + tid; ii < work_source.get_size(); ii += nthreads) {
index_t node = work_source.get_work(ii);
current_ranks[node] = 1.0 - ALPHA;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
}
}
template<
typename WorkSource,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankKernel__Single__(
WorkSource work_source, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res == 0) continue; // might happen if work_source has duplicates
current_ranks[node] += res;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = res * ALPHA / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
}
}
/*
* The per-device Page Rank problem
*/
template<typename TGraph,
template<typename> class ResidualDatum,
template<typename> class RankDatum>
struct Problem {
TGraph m_graph;
ResidualDatum<rank_t> m_residual;
RankDatum<rank_t> m_current_ranks;
Problem(const TGraph &graph, RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) :
m_graph(graph), m_residual(residual), m_current_ranks(current_ranks) {
}
template<typename WorkSource>
void Init__Single__(const WorkSource &workSource, groute::Stream &stream) const {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_graph.owned_nnodes());
Marker::MarkWorkitems(m_graph.owned_nnodes(), "PageRankInit__Single__");
PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(workSource, m_graph, m_current_ranks, m_residual);
}
template<typename WorkSource>
bool RankCheck__Single__(WorkSource work_source, mgpu::context_t &context) {
rank_t *tmp = m_current_ranks.data_ptr;
auto check_segment_sizes = [=]__device__(int idx) {
return tmp[idx];
};
mgpu::mem_t<double> checkSum(1, context);
mgpu::mem_t<int> deviceOffsets = mgpu::mem_t<int>(work_source.get_size(), context);
int *scanned_offsets = deviceOffsets.data();
mgpu::transform_scan<double>(check_segment_sizes, work_source.get_size(),
scanned_offsets, mgpu::plus_t<double>(), checkSum.data(), context);
double pr_sum = mgpu::from_mem(checkSum)[0];
VLOG(1) << "Checking... SUM: " << pr_sum << " Relative SUM: " << pr_sum / work_source.get_size();
return pr_sum / work_source.get_size() < FLAGS_threshold;
}
template<typename WorkSource>
void
Relax__Single__(const WorkSource &work_source, groute::Stream &stream) {
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, work_source.get_size());
Marker::MarkWorkitems(work_source.get_size(), "PageRankKernel__Single__");
PageRankKernel__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(work_source, m_graph, m_current_ranks, m_residual);
}
};
struct Algo {
static const char *NameLower() { return "pr"; }
static const char *Name() { return "PR"; }
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
}
bool TopologyDrivenUnoptPR() {
VLOG(0) << "TopologyDrivenUnoptPR";
typedef groute::Queue<index_t> Worklist;
groute::graphs::single::NodeOutputDatum<rank_t> residual;
groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
utils::traversal::Context<data_driven_unopt_pr::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
dev_graph_allocator.AllocateDatumObjects(residual, current_ranks);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
groute::Stream stream = context.CreateStream(0);
mgpu::standard_context_t mgpu_context(true, stream.cuda_stream);
data_driven_unopt_pr::Problem<
groute::graphs::dev::CSRGraph,
groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum>
solver(
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
residual.DeviceObject());
Stopwatch sw(true);
solver.Init__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
stream);
int iteration = 0;
bool running = true;
while (running) {
solver.Relax__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()), stream);
stream.Sync();
running = solver.RankCheck__Single__(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()), mgpu_context);
if (++iteration > FLAGS_max_pr_iterations) {
LOG(WARNING) << "maximum iterations reached";
break;
}
VLOG(1) << "Iteration: " << iteration;
}
sw.stop();
VLOG(1)
<< boost::format("%s terminated after %d iterations (max: %d)") % data_driven_unopt_pr::Algo::Name() % iteration %
FLAGS_max_pr_iterations;
VLOG(0) << data_driven_unopt_pr::Algo::Name() << ": " << sw.ms() << " ms. <filter>";
// Gather
auto gathered_output = data_driven_unopt_pr::Algo::Gather(dev_graph_allocator, residual, current_ranks);
if (FLAGS_output.length() != 0)
data_driven_unopt_pr::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = data_driven_unopt_pr::Algo::Host(context.host_graph, residual, current_ranks);
return data_driven_unopt_pr::Algo::CheckErrors(gathered_output, regression) == 0;
} else {
LOG(WARNING) << "Result not checked";
return true;
}
} |
086b30fd2d801be930c9b5edd854967d21253b1c.hip | // !!! This is a file automatically generated by hipify!!!
#include <chrono>
#include <ctime>
#include <ratio>
#include <hip/hip_runtime_api.h>
#include "catch.hpp"
#include "../split_step.h"
#include "../twoParticlesInHO_Ham.h"
#include "../wavefunction.h"
#include "../easyloggingcpp/easylogging++.h"
#include "../utils.h"
using namespace std::chrono;
__global__ void g_calcExpVPhi(hamFunc * df_V, hamFunc * df_U, hamFunc * df_Nonl, void * d_par, double T, double half_i_dt, hipfftDoubleComplex * d_expV_phi, double * d_x, size_t vSz);
TEST_CASE("Split step Constructor works","[SPLIT]"){
resetCudaError();
size_t Nx = 64;
// dx = 2.0 * xmax / (double)Nx
C2ParticlesHO ham(Nx, 0.0);
CSplitStep splitStepOperator(&ham);
REQUIRE(splitStepOperator.ham != NULL);
CSplitStep badOper(NULL);
REQUIRE(badOper.ham == NULL);
}
TEST_CASE("ExpV.*phi works","[SPLIT]"){
resetCudaError();
CWaveFunction wf("src/unittest/psiRe.csv", "src/unittest/psiIm.csv");
REQUIRE(wf.getColsSize() > 0);
REQUIRE(wf.getRowSize() > 0);
REQUIRE(wf.getHostWF() != NULL);
REQUIRE(wf.copyToGPU() == 0);
size_t vSize = wf.getColsSize();
double x0 = 0.0;
double omega = 0.5;
double xmax = 5.0;
double g = 4.4467;
double dt = 0.001;
double T = 0;
double eps = pow(10, -3);
double half_i_dt = -0.5 * dt;
// Harmonic pot. centered on x0, x in [-xmax, xmax]
C2ParticlesHO ham(vSize, g, xmax, x0, omega);
CSplitStep splitStepOperator(&ham);
REQUIRE(splitStepOperator.ham != NULL);
// Get cuda properties
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
// Maximum threads per block on this device
int maxThreads = prop.maxThreadsPerBlock;
// Call the kernel to generate expV
int blockSzX = min((size_t)maxThreads, vSize);
dim3 threadsPerBlock(blockSzX, 1);
dim3 grid(ceil(vSize / blockSzX), vSize);
hipLaunchKernelGGL(( g_calcExpVPhi), dim3(grid), dim3(threadsPerBlock), 0, 0, ham.timeDepPotential(), ham.timeDepInteraction(), ham.timeDepNonLin(), ham.getParams(), T, half_i_dt, wf.getDeviceWF(), ham.getDeviceX(), vSize);
REQUIRE(hipGetLastError() == hipSuccess);
// normalize
REQUIRE(normalize(wf.getDeviceWF(), vSize, ham.getCoordStep()) == 0);
// Compare with the reference
REQUIRE(wf.copyFromGPU() == 0);
CWaveFunction refWF("src/unittest/expVPsiReal.csv","src/unittest/expVPsiImag.csv");
REQUIRE(refWF.copyToGPU() == 0);
REQUIRE(normalize(refWF.getDeviceWF(), vSize, ham.getCoordStep()) == 0);
REQUIRE(refWF.copyFromGPU() == 0);
for (int i = 0; i < vSize * vSize; i++)
{
REQUIRE(wf.getHostWF()[i][0] == Approx(refWF.getHostWF()[i][0]).margin(eps));
REQUIRE(wf.getHostWF()[i][1] == Approx(wf.getHostWF()[i][1]).margin(eps));
}
}
TEST_CASE("Time one split step", "[SPLIT]"){
hipProfilerStart();
size_t N = 512;
double xmax = 5.0;
double x0 = 0.0;
double omega = 0.5;
double dt = 0.001;
hipfftHandle * plan = NULL;
CWaveFunction wf(N, xmax, 1.7);
REQUIRE(wf.getColsSize() > 0);
REQUIRE(wf.getRowSize() > 0);
REQUIRE(wf.getRowSize() > 0);
REQUIRE(wf.getHostWF() != NULL);
REQUIRE(wf.copyToGPU() == 0);
C2ParticlesHO ham(N, 0.0, xmax, x0, omega);
CSplitStep splitStepOperator(&ham);
int avNum = 10000;
double sec = 0.0;
double av = 0.0;
for (int i = 0; i < avNum; i++)
{
high_resolution_clock::time_point t1 = high_resolution_clock::now();
REQUIRE(splitStepOperator.advanceOneStep(dt, 0, wf, plan) == 0);
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
sec += time_span.count();
//std::cout <<"Time step " << i << "takes " << time_span.count() << "sec" << std::endl;
}
av = sec / (double)avNum;
LOG(INFO) << "Total time for "<< avNum << " split steps : " << sec << " , average time per step: " << av << std::endl;
hipfftDestroy(*plan);
hipProfilerStop();
}
TEST_CASE("One time step advancement works", "[SPLIT]"){
resetCudaError();
std::string realFile("src/unittest/WF1024dt0_001Re.csv");
std::string imagFile("src/unittest/WF1024dt0_001Im.csv");
CWaveFunction wf(realFile, imagFile);
REQUIRE(wf.getColsSize() > 0);
REQUIRE(wf.getHostWF() != NULL);
REQUIRE(wf.copyToGPU() == 0);
// Split step operator
double xmax = 5.0;
double x0 = 0.0;
double omega = 0.5;
C2ParticlesHO ham(wf.getColsSize(), 0.0, xmax, x0, omega);
CSplitStep splitStepOperator(&ham);
REQUIRE(splitStepOperator.ham != NULL);
// Advance one step
double curT = 1.1;
double dt = 0.001;
hipfftHandle * plan = NULL;
REQUIRE(splitStepOperator.advanceOneStep(dt, curT, wf, plan) == 0);
REQUIRE(normalize(wf.getDeviceWF(), wf.getColsSize(), ham.getCoordStep()) == 0);
REQUIRE(wf.copyFromGPU() == 0);
// Load reference
CWaveFunction refWF("src/unittest/WF1024dt0_001StepRe.csv","src/unittest/WF1024dt0_001StepIm.csv");
REQUIRE(refWF.copyToGPU() == 0);
REQUIRE(normalize(refWF.getDeviceWF(), refWF.getColsSize(), ham.getCoordStep()) == 0);
REQUIRE(refWF.copyFromGPU() == 0);
// Compare with the reference
double eps = pow(10, -3);
REQUIRE(wf.getColsSize() == refWF.getColsSize());
REQUIRE(wf.getRowSize() == refWF.getRowSize());
for (int i = 0; i < wf.getColsSize() * wf.getRowSize(); i++)
{
REQUIRE(wf.getHostWF()[i][0] == Approx(refWF.getHostWF()[i][0]).margin(eps));
REQUIRE(wf.getHostWF()[i][1] == Approx(refWF.getHostWF()[i][1]).margin(eps));
}
hipfftDestroy(*plan);
}
| 086b30fd2d801be930c9b5edd854967d21253b1c.cu | #include <chrono>
#include <ctime>
#include <ratio>
#include <cuda_profiler_api.h>
#include "catch.hpp"
#include "../split_step.h"
#include "../twoParticlesInHO_Ham.h"
#include "../wavefunction.h"
#include "../easyloggingcpp/easylogging++.h"
#include "../utils.h"
using namespace std::chrono;
__global__ void g_calcExpVPhi(hamFunc * df_V, hamFunc * df_U, hamFunc * df_Nonl, void * d_par, double T, double half_i_dt, cufftDoubleComplex * d_expV_phi, double * d_x, size_t vSz);
TEST_CASE("Split step Constructor works","[SPLIT]"){
resetCudaError();
size_t Nx = 64;
// dx = 2.0 * xmax / (double)Nx
C2ParticlesHO ham(Nx, 0.0);
CSplitStep splitStepOperator(&ham);
REQUIRE(splitStepOperator.ham != NULL);
CSplitStep badOper(NULL);
REQUIRE(badOper.ham == NULL);
}
TEST_CASE("ExpV.*phi works","[SPLIT]"){
resetCudaError();
CWaveFunction wf("src/unittest/psiRe.csv", "src/unittest/psiIm.csv");
REQUIRE(wf.getColsSize() > 0);
REQUIRE(wf.getRowSize() > 0);
REQUIRE(wf.getHostWF() != NULL);
REQUIRE(wf.copyToGPU() == 0);
size_t vSize = wf.getColsSize();
double x0 = 0.0;
double omega = 0.5;
double xmax = 5.0;
double g = 4.4467;
double dt = 0.001;
double T = 0;
double eps = pow(10, -3);
double half_i_dt = -0.5 * dt;
// Harmonic pot. centered on x0, x in [-xmax, xmax]
C2ParticlesHO ham(vSize, g, xmax, x0, omega);
CSplitStep splitStepOperator(&ham);
REQUIRE(splitStepOperator.ham != NULL);
// Get cuda properties
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
// Maximum threads per block on this device
int maxThreads = prop.maxThreadsPerBlock;
// Call the kernel to generate expV
int blockSzX = min((size_t)maxThreads, vSize);
dim3 threadsPerBlock(blockSzX, 1);
dim3 grid(ceil(vSize / blockSzX), vSize);
g_calcExpVPhi<<<grid, threadsPerBlock>>>(ham.timeDepPotential(), ham.timeDepInteraction(), ham.timeDepNonLin(), ham.getParams(), T, half_i_dt, wf.getDeviceWF(), ham.getDeviceX(), vSize);
REQUIRE(cudaGetLastError() == cudaSuccess);
// normalize
REQUIRE(normalize(wf.getDeviceWF(), vSize, ham.getCoordStep()) == 0);
// Compare with the reference
REQUIRE(wf.copyFromGPU() == 0);
CWaveFunction refWF("src/unittest/expVPsiReal.csv","src/unittest/expVPsiImag.csv");
REQUIRE(refWF.copyToGPU() == 0);
REQUIRE(normalize(refWF.getDeviceWF(), vSize, ham.getCoordStep()) == 0);
REQUIRE(refWF.copyFromGPU() == 0);
for (int i = 0; i < vSize * vSize; i++)
{
REQUIRE(wf.getHostWF()[i][0] == Approx(refWF.getHostWF()[i][0]).margin(eps));
REQUIRE(wf.getHostWF()[i][1] == Approx(wf.getHostWF()[i][1]).margin(eps));
}
}
TEST_CASE("Time one split step", "[SPLIT]"){
cudaProfilerStart();
size_t N = 512;
double xmax = 5.0;
double x0 = 0.0;
double omega = 0.5;
double dt = 0.001;
cufftHandle * plan = NULL;
CWaveFunction wf(N, xmax, 1.7);
REQUIRE(wf.getColsSize() > 0);
REQUIRE(wf.getRowSize() > 0);
REQUIRE(wf.getRowSize() > 0);
REQUIRE(wf.getHostWF() != NULL);
REQUIRE(wf.copyToGPU() == 0);
C2ParticlesHO ham(N, 0.0, xmax, x0, omega);
CSplitStep splitStepOperator(&ham);
int avNum = 10000;
double sec = 0.0;
double av = 0.0;
for (int i = 0; i < avNum; i++)
{
high_resolution_clock::time_point t1 = high_resolution_clock::now();
REQUIRE(splitStepOperator.advanceOneStep(dt, 0, wf, plan) == 0);
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
sec += time_span.count();
//std::cout <<"Time step " << i << "takes " << time_span.count() << "sec" << std::endl;
}
av = sec / (double)avNum;
LOG(INFO) << "Total time for "<< avNum << " split steps : " << sec << " , average time per step: " << av << std::endl;
cufftDestroy(*plan);
cudaProfilerStop();
}
TEST_CASE("One time step advancement works", "[SPLIT]"){
resetCudaError();
std::string realFile("src/unittest/WF1024dt0_001Re.csv");
std::string imagFile("src/unittest/WF1024dt0_001Im.csv");
CWaveFunction wf(realFile, imagFile);
REQUIRE(wf.getColsSize() > 0);
REQUIRE(wf.getHostWF() != NULL);
REQUIRE(wf.copyToGPU() == 0);
// Split step operator
double xmax = 5.0;
double x0 = 0.0;
double omega = 0.5;
C2ParticlesHO ham(wf.getColsSize(), 0.0, xmax, x0, omega);
CSplitStep splitStepOperator(&ham);
REQUIRE(splitStepOperator.ham != NULL);
// Advance one step
double curT = 1.1;
double dt = 0.001;
cufftHandle * plan = NULL;
REQUIRE(splitStepOperator.advanceOneStep(dt, curT, wf, plan) == 0);
REQUIRE(normalize(wf.getDeviceWF(), wf.getColsSize(), ham.getCoordStep()) == 0);
REQUIRE(wf.copyFromGPU() == 0);
// Load reference
CWaveFunction refWF("src/unittest/WF1024dt0_001StepRe.csv","src/unittest/WF1024dt0_001StepIm.csv");
REQUIRE(refWF.copyToGPU() == 0);
REQUIRE(normalize(refWF.getDeviceWF(), refWF.getColsSize(), ham.getCoordStep()) == 0);
REQUIRE(refWF.copyFromGPU() == 0);
// Compare with the reference
double eps = pow(10, -3);
REQUIRE(wf.getColsSize() == refWF.getColsSize());
REQUIRE(wf.getRowSize() == refWF.getRowSize());
for (int i = 0; i < wf.getColsSize() * wf.getRowSize(); i++)
{
REQUIRE(wf.getHostWF()[i][0] == Approx(refWF.getHostWF()[i][0]).margin(eps));
REQUIRE(wf.getHostWF()[i][1] == Approx(refWF.getHostWF()[i][1]).margin(eps));
}
cufftDestroy(*plan);
}
|
72e474e86dc427f2a70649aff404eaaf29c84591.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <MatKernel.hpp>
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#if __CUDA_ARCH__ > 200
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
__device__ double op_add(double a, double b) {return a+b;}
__device__ double op_sub(double a, double b) {return a-b;}
__device__ double op_mul(double a, double b) {return a*b;}
__device__ double op_div(double a, double b) {return a/b;}
__device__ double op_gt(double a, double b) {return (a > b) ? 1.0f : 0;}
__device__ double op_lt(double a, double b) {return (a < b) ? 1.0f : 0;}
__device__ double op_eq(double a, double b) {return (a == b) ? 1.0f : 0;}
__device__ double op_ge(double a, double b) {return (a >= b) ? 1.0f : 0;}
__device__ double op_le(double a, double b) {return (a <= b) ? 1.0f : 0;}
__device__ double op_ne(double a, double b) {return (a != b) ? 1.0f : 0;}
__device__ double op_max(double a, double b) {return max(a,b);}
__device__ double op_min(double a, double b) {return min(a,b);}
__device__ double op_atan2(double a, double b) {return atan2f(a, b);}
__device__ double op_pow(double a, double b) {return powf(a, b);}
typedef double (*optype)(double,double);
__device__ const optype operators[] = {
op_add,
op_sub,
op_mul,
op_div,
op_gt,
op_lt,
op_eq,
op_ge,
op_le,
op_ne,
op_max,
op_min,
op_atan2,
op_pow};
__device__ double fn_abs(double a) {return abs(a);}
__device__ double fn_exp(double a) {return expf(a);}
__device__ double fn_log(double a) {return logf(a);}
__device__ double fn_expm1(double a) {return expm1f(a);}
__device__ double fn_sqrt(double a) {return sqrtf(a);}
__device__ double fn_ln(double a) {return logf(a);}
__device__ double fn_log10(double a) {return log10f(a);}
__device__ double fn_log1p(double a) {return log1pf(a);}
__device__ double fn_cos(double a) {return cosf(a);}
__device__ double fn_sin(double a) {return sinf(a);}
__device__ double fn_tan(double a) {return tanf(a);}
__device__ double fn_cosh(double a) {return coshf(a);}
__device__ double fn_sinh(double a) {return sinhf(a);}
__device__ double fn_tanh(double a) {return tanhf(a);}
__device__ double fn_acos(double a) {return acosf(a);}
__device__ double fn_asin(double a) {return asinf(a);}
__device__ double fn_atan(double a) {return atanf(a);}
__device__ double fn_acosh(double a) {return acoshf(a);}
__device__ double fn_asinh(double a) {return asinhf(a);}
__device__ double fn_atanh(double a) {return atanhf(a);}
__device__ double fn_erf(double a) {return erff(a);}
__device__ double fn_erfinv(double a) {return erfinvf(a);}
__device__ double fn_erfc(double a) {return erfcf(a);}
__device__ double fn_erfcinv(double a) {return erfcinvf(a);}
__device__ double fn_gammaln(double a) {return lgammaf(a);}
__device__ double fn_gamma(double a) {return tgammaf(a);}
__device__ double fn_ceil(double a) {return ceilf(a);}
__device__ double fn_floor(double a) {return floorf(a);}
__device__ double fn_round(double a) {return roundf(a);}
__device__ double fn_trunc(double a) {return truncf(a);}
__device__ double fn_sign(double a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);}
__device__ double fn_j0(double a) {return j0f(a);}
__device__ double fn_j1(double a) {return j1f(a);}
//__device__ double fn_jn(double a) {return jnf(a);}
__device__ double fn_y0(double a) {return y0f(a);}
__device__ double fn_y1(double a) {return y1f(a);}
//__device__ double fn_yn(double a) {return ynf(a);}
__device__ double fn_exppsi(double a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;}
__device__ double fn_atan2(double a, double b) {return atan2f(a, b);}
__device__ double fn_pow(double a, double b) {return powf(a, b);}
typedef double (*fntype)(double);
__device__ const fntype fctns[35] = {
fn_abs,
fn_exp,
fn_expm1,
fn_sqrt,
fn_ln,
fn_log10,
fn_log1p,
fn_cos,
fn_sin,
fn_tan,
fn_cosh,
fn_sinh,
fn_tanh,
fn_acos,
fn_asin,
fn_atan,
fn_acosh,
fn_asinh,
fn_atanh,
fn_erf,
fn_erfinv,
fn_erfc,
fn_erfcinv,
fn_gammaln,
fn_gamma,
fn_ceil,
fn_floor,
fn_round,
fn_trunc,
fn_sign,
fn_j0,
fn_j1,
fn_y0,
fn_y1,
fn_exppsi};
__device__ const optype fctns2[2] = {
fn_atan2,
fn_pow};
__global__ void __apply_gfun(double *A, double *B, int N, int opn) {
fntype fn = fctns[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = fn(A[i]);
}
}
void setsizesD(int N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 32;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
int apply_gfun(double *A, double *B, int N, int opn) {
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_gfun), dim3(griddims),dim3(nthreads), 0, 0, A, B, N, opn);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
template <class T>
__global__ void __toDouble(T *A, double *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (double)(A[i]);
}
}
__global__ void __toInt(double *A, int *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (int)(A[i]);
}
}
int IntToDouble(int *A, double *B, int N) {
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __toDouble<int>), dim3(griddims),dim3(nthreads), 0, 0, A, B, N);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int FloatToDouble(float *A, double *B, int N) {
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __toDouble<float>), dim3(griddims),dim3(nthreads), 0, 0, A, B, N);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int toInt(double *A, int *B, int N) {
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __toInt), dim3(griddims),dim3(nthreads), 0, 0, A, B, N);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __full(int *ir, int *ic, double *data, double *od, int nrows, int ncols, int nnz) {
int i, row, col;
double v;
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (i = id; i < nnz; i += blockDim.x * gridDim.x) {
v = data[i];
row = ir[i];
col = ic[i];
od[row + col * nrows] = v;
}
}
int full(int *ir, int *ic, double *data, double *od, int nrows, int ncols, int nnz) {
int nblocks = min(32, 1+(nnz-1)/32);
int nthreads = max(32, min(1+(nnz-1)/nblocks, 1024));
hipLaunchKernelGGL(( __full), dim3(nblocks),dim3(nthreads), 0, 0, ir, ic, data, od, nrows, ncols, nnz);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_gfun2(double *A, double *B, double *C, int N, int opn) {
optype fn = fctns2[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = fn(A[i], B[i]);
}
}
int apply_gfun2(double *A, double *B, double *C, int N, int opn) {
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
hipLaunchKernelGGL(( __apply_gfun2), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __apply_full(double *A, double *B, double *C, int N, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
double val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
double val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(val,B[i]);
}
}
__global__ void __set_val(double *A, double val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
}
int set_val(double *A, double val, int length) {
int nthreads;
dim3 griddims;
setsizesD(length, &griddims, &nthreads);
hipLaunchKernelGGL(( __set_val), dim3(griddims),dim3(nthreads), 0, 0, A, val, length);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int set_ival(double *A, int val, int length) {
int nthreads;
dim3 griddims;
setsizesD(length, &griddims, &nthreads);
hipLaunchKernelGGL(( __set_val), dim3(griddims),dim3(nthreads), 0, 0, A, *((double *)&val), length);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int apply_binop(double *A, int Anrows, int Ancols,
double *B, int Bnrows, int Bncols, double *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
if (Anrows == Bnrows && Ancols == Bncols) {
hipLaunchKernelGGL(( __apply_full), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_col), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
hipLaunchKernelGGL(( __apply_right_row), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_col), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
hipLaunchKernelGGL(( __apply_left_row), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
hipLaunchKernelGGL(( __apply_right_val), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
hipLaunchKernelGGL(( __apply_left_val), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __sdoprow(int nrows, int ncols, int nnz, double *A, int *Aic, double *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
int col = Aic[i];
double oldA = A[i];
A[i] = op(oldA,B[col]);
}
}
__global__ void __sdopcol(int nrows, int ncols, int nnz, double *A, int *Air, double *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
int row = Air[i];
double oldA = A[i];
A[i] = op(oldA,B[row]);
}
}
__global__ void __sdopval(int nnz, double *A, double *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
double bval = B[0];
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
double oldA = A[i];
A[i] = op(oldA,bval);
}
}
int sdoprow(int nrows, int ncols, int nnz, double *A, int *Aic,
double *B, int len, int opn) {
int nthreads;
dim3 griddims;
setsizes(nnz, &griddims, &nthreads);
if (len > 1) {
hipLaunchKernelGGL(( __sdoprow), dim3(griddims),dim3(nthreads), 0, 0, nrows, ncols, nnz, A, Aic, B, opn);
} else {
hipLaunchKernelGGL(( __sdopval), dim3(griddims),dim3(nthreads), 0, 0, nnz, A, B, opn);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int sdopcol(int nrows, int ncols, int nnz, double *A, int *Air,
double *B, int len, int opn) {
int nthreads;
dim3 griddims;
setsizes(nnz, &griddims, &nthreads);
if (len > 1) {
hipLaunchKernelGGL(( __sdopcol), dim3(griddims),dim3(nthreads), 0, 0, nrows, ncols, nnz, A, Air, B, opn);
} else {
hipLaunchKernelGGL(( __sdopval), dim3(griddims),dim3(nthreads), 0, 0, nnz, A, B, opn);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
// Implement B[I,J] = A
// indexed copy: version with one block per column
#define COPYTOINDS2DA(DFNAME,IEXPR,JEXPR) \
__global__ void __copyToInds2D##DFNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[IEXPR + icol * ldb] = A[i + iblock * lda]; \
} \
} \
}
COPYTOINDS2DA(nn,I[i],J[iblock])
COPYTOINDS2DA(xn,i,J[iblock])
COPYTOINDS2DA(nx,I[i],iblock)
COPYTOINDS2DA(xx,i,iblock)
// Implement B[I,J] = A
// indexed copy: version with one thread per element
#define COPYTOINDS2DB(DFNAME,IEXPR,JEXPR) \
__global__ void __copyToInds2DB##DFNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[IEXPR + JEXPR * ldb] = A[irow + icol * lda]; \
} \
}
COPYTOINDS2DB(nn,I[irow],J[icol])
COPYTOINDS2DB(xn,irow,J[icol])
COPYTOINDS2DB(nx,I[irow],icol)
COPYTOINDS2DB(xx,irow,icol)
// Implement B[I,J] = A
int copyToInds2D(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((double)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2Dxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2Dxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2Dnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2Dnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2DBxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2DBxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyToInds2DBnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyToInds2DBnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
// Implement B = A[I,J]
// indexed copy: version with one block per column
#define COPYFROMINDS2DA(FNAME,IEXPR,JEXPR) \
__global__ void __copyFromInds2D##FNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[i + iblock * ldb] = A[IEXPR + icol * lda]; \
} \
} \
}
COPYFROMINDS2DA(nn,I[i],J[iblock])
COPYFROMINDS2DA(xn,i,J[iblock])
COPYFROMINDS2DA(nx,I[i],iblock)
COPYFROMINDS2DA(xx,i,iblock)
// Implement B = A[I,J]
// indexed copy: version with one thread per element
#define COPYFROMINDS2DB(FNAME,IEXPR,JEXPR) \
__global__ void __copyFromInds2DB##FNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[irow + icol * ldb] = A[IEXPR + JEXPR * lda]; \
} \
}
COPYFROMINDS2DB(nn,I[irow],J[icol])
COPYFROMINDS2DB(xn,irow,J[icol])
COPYFROMINDS2DB(nx,I[irow],icol)
COPYFROMINDS2DB(xx,irow,icol)
// Implement B = A[I,J]
int copyFromInds2D(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2Dxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2Dxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2Dnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2Dnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2DBxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2DBxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
hipLaunchKernelGGL(( __copyFromInds2DBnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
} else {
hipLaunchKernelGGL(( __copyFromInds2DBnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dsmult(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
double sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[i + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[i + nrows * Bic[j]], sum);
sum = 0;
}
}
}
}
__global__ void __dsmultx(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
double sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum);
sum = 0;
}
}
}
int dsmult(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
hipLaunchKernelGGL(( __dsmultx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dsmult_tune(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C, int nblocks, int nthreads) {
hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dsmultx_tune(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C, int nblocks, int nthreadsx, int nthreadsy) {
dim3 threadDim(nthreadsx, nthreadsy, 1);
hipLaunchKernelGGL(( __dsmultx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dsmultT(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
double aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]);
}
}
}
__global__ void __dsmultTx(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
double aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[threadIdx.x + nrows * Bic[j]];
}
atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]);
}
}
int dsmultT(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
hipLaunchKernelGGL(( __dsmultTx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
hipLaunchKernelGGL(( __dsmultT), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Aic[i]], P[i]);
}
}
__global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Air[i]], P[i]);
}
}
int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B, int n) {
int nthreads = max(32, min(128, nnz));
int nblks = min(65536, max(1, (nnz-1) / 128));
if (n == 1) {
hipLaunchKernelGGL(( __spsum1), dim3(nblks),dim3(nthreads), 0, 0, nrows, ncols, nnz, Air, Aic, P, B);
} else {
hipLaunchKernelGGL(( __spsum2), dim3(nblks),dim3(nthreads), 0, 0, nrows, ncols, nnz, Air, Aic, P, B);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P);
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cic, double *P);
__global__ void __reduce1op(int nrows, int ncols, double *A, double *B, int opn);
__global__ void __reducebin1op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr);
#define DDS_BLKY 32
#if __CUDA_ARCH__ > 200
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
double sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
for (int i = 1; i < blockDim.x; i *= 2) {
double tmp = __shfl_down(sum, i);
if (threadIdx.x + i < blockDim.x) sum = sum + tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&P[j], sum);
}
}
}
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) {
__shared__ double merge[32];
int jstart = ((long long)blockIdx.x) * ncols / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int aoff, boff;
double user, prod, sum, bsum;
for (int j0 = jstart; j0 < jend ; j0++) {
boff = nrows * j0;
user = B[tid + boff];
for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) {
aoff = nrows * Cir[j];
prod = A[tid + aoff] * user;
sum = prod + __shfl_down(prod, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
bsum = __shfl(sum, 0);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
merge[threadIdx.x] = bsum;
}
__syncthreads();
if (threadIdx.y == 0) {
sum = merge[threadIdx.x];
sum = sum + __shfl_down(sum, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
if (threadIdx.x == 0) {
P[j] = sum;
}
}
}
}
}
#else
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
__shared__ double parts[32*DDS_BLKY];
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
double sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
parts[tid] = sum;
for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) {
__syncthreads();
if (i + tid < blockDim.x * blockDim.y) {
parts[tid] = parts[tid] + parts[i + tid];
}
}
__syncthreads();
if (tid == 0) {
P[j] = parts[0];
}
__syncthreads();
}
}
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) {}
#endif
int dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,nnz/128));
hipLaunchKernelGGL(( __dds), dim3(nblocks),dim3(blockDims), 0, 0, nrows, nnz, A, B, Cir, Cic, P);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cic, double *P) {
dim3 blockDims(32, 32, 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,ncols/64));
hipLaunchKernelGGL(( __dds0), dim3(nblocks),dim3(blockDims), 0, 0, nrows, ncols, A, B, Cir, Cic, P);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
__global__ void __reduce1op(int nrows, int ncols, double *A, double *B, int opn) {
optype op = operators[opn];
int basecol = threadIdx.y + blockDim.y * blockIdx.x;
double v;
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) {
v = 0;
if (threadIdx.x < nrows) v = A[threadIdx.x + icol * nrows];
for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) {
v = op(v, A[i + icol * nrows]);
}
for (int i = 1; i < blockDim.x; i *= 2) {
v = op(v, __shfl_down(v, i));
}
if (threadIdx.x == 0) {
B[icol] = v;
}
}
}
#else
__global__ void __reduce1op(int nrows, int ncols, double *A, double *B, int opn) {
__shared__ double parts[32][33];
optype op = operators[opn];
double v;
for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) {
v = 0;
if (threadIdx.x < nrows) v = A[threadIdx.x + icol * nrows];
for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]);
}
}
if (threadIdx.x == 0) {
B[icol] = parts[0][threadIdx.y];
}
__syncthreads();
}
}
#endif
int reduce1op(int nrows, int ncols, double *A, double *B, int opn) {
int blkx = 32;
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
hipLaunchKernelGGL(( __reduce1op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, opn);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
__global__ void __reducebin1op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr) {
optype opbf = operators[opb];
optype oprf = operators[opr];
int basecol = threadIdx.y + blockDim.y * blockIdx.x;
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) {
double v = 0;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
v = oprf(v, opbf(A[i + icol * nrows], B[i + icol * nrows]));
}
for (int i = 1; i < blockDim.x; i *= 2) {
v = oprf(v, __shfl_down(v, i));
}
if (threadIdx.x == 0) {
C[icol] = v;
}
}
}
#else
__global__ void __reducebin1op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr) {
__shared__ double parts[32][33];
optype opbf = operators[opb];
optype oprf = operators[opr];
for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) {
double v = 0;
for (int irow = threadIdx.x; irow < nrows; irow += blockDim.x) {
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows]));
}
parts[threadIdx.x][threadIdx.y] = v;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]);
}
}
if (threadIdx.x == 0) {
C[icol] = parts[0][threadIdx.y];
}
__syncthreads();
}
}
#endif
int reducebin1op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
hipLaunchKernelGGL(( __reducebin1op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, C, opb, opr);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#define BLOCKDIM 32
__global__ void __transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) {
int gridx = min(32, 1+(nrows-1)/256);
int gridy = min(32, 1+(ncols-1)/256);
const dim3 griddims(gridx, gridy, 1);
const dim3 blockdims(BLOCKDIM,16,1);
hipError_t err;
int dev = -1;
hipGetDevice(&dev);
hipLaunchKernelGGL(( __transpose), dim3(griddims),dim3(blockdims), 0, 0, in, instride, out, outstride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "cuda error device %d in transpose of %dx%d matrix", dev, nrows, ncols);
return err;
}
return 0;
}
__global__ void __reduce2op(int nrows, int ncols, double *A, double *B, int opn) {
__shared__ double parts[32][33];
optype op = operators[opn];
int baserow = threadIdx.x + blockDim.x * blockIdx.x;
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) {
double v = A[irow + threadIdx.y * nrows];
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
__syncthreads();
double newv = 0;
for (int i = 1; i < blockDim.y; i *= 2) {
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y];
__syncthreads();
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv);
__syncthreads();
}
if (threadIdx.y == 0) {
B[irow] = parts[threadIdx.x][0];
}
__syncthreads();
}
}
int reduce2op(int nrows, int ncols, double *A, double *B, int opn) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
hipLaunchKernelGGL(( __reduce2op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, opn);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __reducebin2op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr) {
__shared__ double parts[32][33];
optype opbf = operators[opb];
optype oprf = operators[opr];
int baserow = threadIdx.x + blockDim.x * blockIdx.x;
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) {
double v = opbf(A[irow + threadIdx.y * nrows], B[irow + threadIdx.y * nrows]);
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) {
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows]));
}
parts[threadIdx.x][threadIdx.y] = v;
__syncthreads();
double newv = 0;
for (int i = 1; i < blockDim.y; i *= 2) {
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y];
__syncthreads();
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], newv);
__syncthreads();
}
if (threadIdx.y == 0) {
C[irow] = parts[threadIdx.x][0];
}
__syncthreads();
}
}
int reducebin2op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
hipLaunchKernelGGL(( __reducebin2op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, C, opb, opr);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __embedmat2d(double *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
double v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
b[i] = (long long)vi + (((long long)(i/nrows+1))<<32);
}
}
__global__ void __embedmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
double v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
c[i] = (long long)vi + (((long long)b[i])<<32);
}
}
int embedmat2d(double *a, long long *b, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizesD(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __embedmat2d), dim3(griddims),dim3(nthreads), 0, 0, a, b, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int embedmat(double *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizesD(n, &griddims, &nthreads);
hipLaunchKernelGGL(( __embedmat), dim3(griddims),dim3(nthreads), 0, 0, a, b, c, n);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __extractmat2d(double *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&b[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
}
}
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
}
int extractmat2d(double *a, long long *b, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizesD(nrows*ncols, &griddims, &nthreads);
hipLaunchKernelGGL(( __extractmat2d), dim3(griddims),dim3(nthreads), 0, 0, a, b, nrows, ncols);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int extractmat(double *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizesD(n, &griddims, &nthreads);
hipLaunchKernelGGL(( __extractmat), dim3(griddims),dim3(nthreads), 0, 0, a, b, c, n);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/reverse.h>
int fsort2d(double *pkeys, unsigned int *pvals, int nrows, int ncols, int asc) {
for (int i = 0; i < ncols; i++) {
thrust::device_ptr<double> keys(pkeys+i*nrows);
thrust::device_ptr<unsigned int> vals(pvals+i*nrows);
if (asc > 0) {
thrust::sort_by_key(keys, keys + nrows, vals);
} else {
thrust::sort_by_key(keys, keys + nrows, vals, thrust::greater<double>());
}
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int fsort(double *pkeys, int N, int asc) {
thrust::device_ptr<double> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N);
} else {
thrust::sort(keys, keys + N, thrust::greater<int>());
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int fsorts(double *pkeys, unsigned int *pvals, int *jc, int m, int asc) {
for (int i = 0; i < m; i++) {
thrust::device_ptr<double> keys(pkeys + jc[i]);
thrust::device_ptr<unsigned int> vals(pvals + jc[i]);
int b = jc[i+1] - jc[i];
if (asc > 0) {
thrust::sort_by_key(keys, keys + b, vals);
} else {
thrust::sort_by_key(keys, keys + b, vals, thrust::greater<double>());
}
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
// This path may break. If so look for radixsort_api.h in /usr/local/cuda/include
// and fix the path below.
using namespace thrust::system::cuda::detail::detail::b40c_thrust;
int fsortsizexD(int N) {
RadixSortingEnactor<double,unsigned int> sorter(N);
return sorter.SpineElements();
}
int fsort2dx(double *pkeys, unsigned int *pvals, double *tkeys, unsigned int *tvals,
int *ispine, bool * bflags, int nrows, int ncols, int asc) {
int i;
hipError_t err;
RadixSortingEnactor<double,unsigned int> sorter(nrows);
RadixSortStorage<double,unsigned int> storage;
storage.d_spine = ispine;
storage.d_from_alt_storage = bflags;
storage.using_alternate_storage = false;
for (i = 0; i < ncols; i++) {
storage.d_keys = pkeys+i*nrows;
storage.d_values = pvals+i*nrows;
storage.d_alt_keys = tkeys;
storage.d_alt_values = tvals;
if (asc == 0) {
thrust::device_ptr<double> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+nrows);
thrust::reverse(vals, vals+nrows);
}
hipDeviceSynchronize();
sorter.EnactSort(storage);
hipDeviceSynchronize();
err = hipGetLastError();
if (err > 0) return err;
if (asc == 0) {
thrust::device_ptr<double> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+nrows);
thrust::reverse(vals, vals+nrows);
}
hipDeviceSynchronize();
if (storage.d_keys == tkeys) {
hipMemcpy(pkeys+i*nrows, tkeys, nrows*sizeof(double), hipMemcpyDeviceToDevice);
}
if (storage.d_values == tvals) {
hipMemcpy(pvals+i*nrows, tvals, nrows*sizeof(unsigned int), hipMemcpyDeviceToDevice);
}
}
return err;
}
__global__ void __stratify(double *strata, int n, double *a, double *b, unsigned int *bi, int stride) {
__shared__ double ss[32];
__shared__ unsigned int ibin[32];
__shared__ unsigned int ebin[32];
__shared__ unsigned int todo[32];
__shared__ double bins[64][33];
__shared__ unsigned int topush;
int tid = threadIdx.x;
ss[tid] = strata[tid];
ibin[tid] = 0;
for (int i = 0; i < n; i += blockDim.x * gridDim.x) {
int ii = i + tid + blockDim.x * blockIdx.x;
if (tid == 0) topush = 0;
if (ii < n) {
double v = a[ii];
int j = 1;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = j - 32;
int k = atomicInc(&ibin[j], 256);
bins[k][j] = v;
if (k == 31) {
k = atomicInc(&topush, 1024);
todo[k] = j;
}
}
if (ibin[tid] >= 32) {
ebin[tid] = atomicAdd(&bi[tid], 32);
ibin[tid] = ibin[tid] - 32;
}
for (int k = 0; k < topush; k++) {
int j = todo[k];
b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j];
}
}
ebin[tid] = atomicAdd(&bi[tid], ibin[tid]);
for (int j = 0; j < 32; j++) {
if (tid < ibin[j]) {
b[j*stride + ebin[j] + tid] = bins[tid][j];
}
}
}
int stratify(double *strata, int n, double *a, double *b, unsigned int *bi, int stride) {
hipLaunchKernelGGL(( __stratify), dim3(40),dim3(32), 0, 0, strata, n, a, b, bi, stride);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#define SNDVALS 256
#define SNDGRPS 4
#define SNTHREADS 1024
#define SBIGBLK (4*1024)
__global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ double ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
double v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
int stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1);
const dim3 griddims(8,1,1);
hipLaunchKernelGGL(( __stratifycounts), dim3(griddims),dim3(blockdims), 0, 0, strata, n, a, bi);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#define RNDVALS 256
#define RNTHREADS 256
#define RNDBITS 8
#define RBIGBLK (4*1024)
__global__ void __radixcounts(double *a, int n, int digit, unsigned int *bi) {
__shared__ unsigned int ic[RNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int tid = threadIdx.x;
int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK);
for (int i = istart; i < iend; i += RBIGBLK) {
__syncthreads();
ic[threadIdx.x] = 0;
__syncthreads();
for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) {
double v = a[j];
unsigned char *cv = (unsigned char *)&v;
atomicInc(&ic[cv[digit]], 65536*32767);
}
__syncthreads();
bi[bibase + threadIdx.x] = ic[threadIdx.x];
bibase += RNDVALS;
}
}
int radixcounts(double *a, int n, int digit, unsigned int *bi) {
const dim3 blockdims(RNTHREADS,1,1);
const dim3 griddims(32,1,1);
hipLaunchKernelGGL(( __radixcounts), dim3(griddims),dim3(blockdims), 0, 0, a, n, digit, bi);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
#define GENDISTS(DFNAME,DFUNC) \
__global__ void DFNAME(double *A, int lda, double *B, int ldb, double *C, \
int ldc, int d, int nrows, int ncols, double p) { \
int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \
int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \
double va, vb, vc; \
double R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \
R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \
int xi = threadIdx.x + xblk; \
int yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
} \
yi = threadIdx.x + yblk; \
int nbr = (threadIdx.x + 1) % blockDim.x; \
for (int i = 0; i < d; i++) { \
va = (xi < nrows) ? A[xi + i * lda] : 0; \
vb = (yi < ncols) ? B[yi + i * ldb] : 0; \
vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \
vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \
vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \
vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \
vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \
vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \
vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \
vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \
vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \
vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \
vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \
vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \
vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \
vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \
vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \
vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \
} \
yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \
} \
}
GENDISTS(__l1dist,vc+=abs(va-vb))
GENDISTS(__l2dist,vc+=(va-vb)*(va-vb))
GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p))
GENDISTS(__linfdist,vc=max(vc,abs(va-vb)))
GENDISTS(__msum,vc=max(vc,va+vb))
#else
__global__ void __l1dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Lidist not supported on arch <= 200\n");
}
__global__ void __l2dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, L2dist not supported on arch <= 200\n");
}
__global__ void __minkowskidist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Minkowski distance not supported on arch <= 200\n");
}
__global__ void __linfdist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Max-abs distance not supported on arch <= 200\n");
}
__global__ void __msum(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Max-sum multiply not supported on arch <= 200\n");
}
#endif
int dists(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
// hipSetDevice(ithread);
if (p == 0.0f) {
hipLaunchKernelGGL(( __linfdist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 1.0f) {
hipLaunchKernelGGL(( __l1dist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 2.0f) {
hipLaunchKernelGGL(( __l2dist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else {
hipLaunchKernelGGL(( __minkowskidist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int maxsumx(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
hipLaunchKernelGGL(( __msum), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, 0);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
__shared__ T tots[32];
int start, end, ij;
int bid = blockIdx.y + blockIdx.z * blockDim.y; // column index
T sum, tsum, tmp, ttot, ttot0;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
start = jc[ij] + bid * nrows;
end = jc[ij+1] + bid * nrows;
sum = 0;
for (int i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
tsum = in[i];
tmp = __shfl_up(tsum, 1);
if (threadIdx.x >= 1) tsum += tmp;
tmp = __shfl_up(tsum, 2);
if (threadIdx.x >= 2) tsum += tmp;
tmp = __shfl_up(tsum, 4);
if (threadIdx.x >= 4) tsum += tmp;
tmp = __shfl_up(tsum, 8);
if (threadIdx.x >= 8) tsum += tmp;
tmp = __shfl_up(tsum, 16);
if (threadIdx.x >= 16) tsum += tmp;
ttot = __shfl(tsum, min(end-start-1, 31));
ttot0 = ttot;
__syncthreads();
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
__syncthreads();
for (int k = 1; k < blockDim.y; k *= 2) {
if (threadIdx.y >= k) {
if (threadIdx.x == threadIdx.y - k) {
ttot += tots[threadIdx.x];
}
}
__syncthreads();
if (threadIdx.y >= k) {
ttot = __shfl(ttot, threadIdx.y - k);
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
}
__syncthreads();
}
out[i] = sum + tsum + ttot - ttot0;
if (threadIdx.x == blockDim.y - 1) {
ttot = tots[threadIdx.x];
}
__syncthreads();
ttot = __shfl(ttot, blockDim.y - 1);
sum += ttot;
}
}
}
}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k, start, end, ij;
int bid = blockIdx.y + blockIdx.z * gridDim.y;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
vmax = maxminv;
imax = -1;
start = jc[ij];
end = jc[ij+1];
for (i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[ij + m * bid] = vmax;
outi[ij + m * bid] = imax;
}
}
}
}
}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
if (bid < ncols) {
vmax = maxminv;
imax = -1;
for (i = threadIdx.x + threadIdx.y * blockDim.x; i < nrows; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[bid] = vmax;
outi[bid] = imax;
}
}
__syncthreads();
}
}
// Not very fast for wide matrices
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
T vmax, vtmp;
int imax, itmp, i, j;
for (i = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); i < nrows; i += blockDim.x * blockDim.y * gridDim.x) {
if (ncols > 0) {
vmax = in[i];
imax = 0;
for (j = 1; j < ncols; j++) {
vtmp = in[i + nrows * j];
itmp = j;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
out[i] = vmax;
outi[i] = imax;
}
}
}
#else
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {}
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {}
#endif
void setindsD(int ncols, int &nc1, int &nc2) {
if (ncols < 65536) {
nc1 = ncols;
nc2 = 1;
} else {
nc1 = (int)sqrt((double)ncols);
nc2 = 1 + (ncols-1)/nc1;
}
}
template<class T>
int cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __cumsumg<T>), dim3(grid),dim3(tblock), 0, 0, in, out, jc, nrows, ncols, m);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int cumsumgf(double *in, double *out, int *jc, int nrows, int ncols, int m) {
return cumsumg<double>(in, out, jc, nrows, ncols, m);
}
template<class T>
int maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __maxming<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, jc, nrows, ncols, m, minv, dir);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
// JFC: problem here ncols a non-multiple of 16, and nrows < 32.
template<class T>
int maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(nc1, nc2, 1);
int ny = min(32, 1+nrows/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __maxmini_cols<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, nrows, ncols, minv, dir);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
template<class T>
int maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
int nb = min(32,1+nrows/32);
dim3 grid(nb,1,1);
int ny = min(32, 1+nrows/nb/32);
dim3 tblock(32, ny, 1);
hipLaunchKernelGGL(( __maxmini_rows<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, nrows, ncols, dir);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int maxgf(double *in, double *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<double>(in, out, outi, jc, nrows, ncols, m, -3e38f, 1);
}
int mingf(double *in, double *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<double>(in, out, outi, jc, nrows, ncols, m, 3e38f, 0);
}
int maxif(double *in, double *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<double>(in, out, outi, nrows, ncols, -3e38f, 1);
} else if (dir == 2) {
return maxmini_rows<double>(in, out, outi, nrows, ncols, 1);
} else {
return -1;
}
}
int minif(double *in, double *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<double>(in, out, outi, nrows, ncols, 3e38f, 0);
} else if (dir == 2) {
return maxmini_rows<double>(in, out, outi, nrows, ncols, 0);
} else {
return -1;
}
}
__global__ void __dmv(double *a, int nrows, int ncols, double *b, double *c) {
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
double accum = 0.0;
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
accum += a[tx+nrows*ty] * b[ty];
}
atomicAdd(&c[tx], accum);
}
}
#if __CUDA_ARCH__ > 200
__global__ void __dmvt(double *a, int nrows, int ncols, double *b, double *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
double accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
for (int i = 1; i < blockDim.x; i *= 2) {
double tmp = __shfl_down(accum, i);
if (threadIdx.x + i < blockDim.x) accum += tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&c[ty], accum);
}
}
}
#else
__global__ void __dmvt(double *a, int nrows, int ncols, double *b, double *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
double accum = 0.0;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
atomicAdd(&c[ty], accum);
}
}
#endif
__global__ void __dmv0(double *a, int nrows, int ncols, int tstep, double *b, double *c) {
double accum = 0.0f;
int tx = threadIdx.x + blockDim.x * blockIdx.x;
if (tx < tstep) {
for (; tx < nrows*ncols; tx += tstep) {
int icol = tx / nrows;
accum += a[tx] * b[icol];
}
int irow = tx % nrows;
atomicAdd(&c[irow], accum);
}
}
int dmv(double *a, int nrows, int ncols, double *b, double *c, int trans) {
if (trans == 1) {
int ntx = min(32, nrows);
int nty = min(32, ncols);
int nbx = min(256, 1 + nrows/ntx/8);
int nby = min(256, 1 + ncols/nty/2);
dim3 blockdims(ntx,nty,1);
dim3 griddims(nbx,nby,1);
hipLaunchKernelGGL(( __dmvt), dim3(griddims),dim3(blockdims), 0, 0, a, nrows, ncols, b, c);
} else {
int ntx = min(1024, nrows*ncols);
int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32);
int tstep = (ntx*nbx/nrows)*nrows;
hipLaunchKernelGGL(( __dmv0), dim3(nbx),dim3(ntx), 0, 0, a, nrows, ncols, tstep, b, c);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#define ACCUM_KERNEL(TI,TJ,TV,TS,II,IJ,IV) \
__global__ void __accum(TI, TJ, TV, TS, int m, int nrows) { \
int istart = ((int)(((long long)blockIdx.x) * m / gridDim.x)); \
int iend = ((int)(((long long)blockIdx.x + 1) * m / gridDim.x)); \
istart = (istart / 32) * 32; \
if (blockIdx.x != gridDim.x - 1) { \
iend = (iend / 32) * 32; \
} \
for (int i = istart + threadIdx.x; i < iend; i+= blockDim.x) { \
atomicAdd(&S[II + nrows * IJ], IV); \
} \
} \
int accum(TI, TJ, TV, TS, int m, int nrows) { \
int nthreads = max(32, min(512, m)); \
int nblocks = max(1, min(65535, m/nthreads/8)); \
hipLaunchKernelGGL(( __accum), dim3(nblocks),dim3(nthreads), 0, 0, I,J,V,S,m,nrows); \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
}
ACCUM_KERNEL(int*I, int*J, double*V, double*S, I[i], J[i], V[i])
ACCUM_KERNEL(int*I, int J, double*V, double*S, I[i], J, V[i])
ACCUM_KERNEL(int I, int*J, double*V, double*S, I, J[i], V[i])
ACCUM_KERNEL(int*I, int*J, double V, double*S, I[i], J[i], V)
ACCUM_KERNEL(int*I, int J, double V, double*S, I[i], J, V)
ACCUM_KERNEL(int I, int*J, double V, double*S, I, J[i], V)
const int INBLOCK = 4;
// copy and transpose columns of the input matrix into the output matrix. nrows refers to the input matrix
// (and so is ncols for the output). ncols is the length of the iptrs array, which will be the number of
// rows of the output matrix. iptrs specifies the columns of the input array to copy.
// outstride is stride of the output matrix
__global__ void __icopy_transpose(int *iptrs, double *in, double *out, int outstride, int nrows, int ncols) {
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x + xb + iptrs[y]*nrows];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int icopy_transpose(int *iptrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __icopy_transpose), dim3(griddims),dim3(blockdims), 0, 0, iptrs, in, out, stride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in icopy_transpose"); return err;}
return 0;
}
// copy and transpose the input matrix into columns of the output matrix. nrows, ncols refer to output matrix
__global__ void __ocopy_transpose(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
out[optrs[y]*nrows + threadIdx.x + xb] = tile[threadIdx.x][y-yb];
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_add(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicAdd(&out[optrs[y]*nrows + threadIdx.x + xb], tile[threadIdx.x][y-yb]);
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_min(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicMin((int *)&out[optrs[y]*nrows + threadIdx.x + xb], *(int *)(&tile[threadIdx.x][y-yb]));
}
}
__syncthreads();
}
}
}
int ocopy_transpose_add(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __ocopy_transpose_add), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __ocopy_transpose), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose_min(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
hipError_t err;
hipLaunchKernelGGL(( __ocopy_transpose_min), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
#ifdef TEST
int main(int argc, char **argv) {
int m=8, n=8, opn = 0;
double *dA, *dB, *dC, *A, *B, *C;
if (argc > 1) {
sscanf(argv[1], "%d", &opn);
if (argc > 2) {
sscanf(argv[2], "%d", &m);
if (argc > 3) {
sscanf(argv[3], "%d", &n);
}
}
}
A = (double *)malloc(m*n*sizeof(double));
B = (double *)malloc(m*n*sizeof(double));
C = (double *)malloc(m*n*sizeof(double));
hipMalloc((void**)&dA, m*n*sizeof(double));
hipMalloc((void**)&dB, m*n*sizeof(double));
hipMalloc((void**)&dC, m*n*sizeof(double));
for (int i = 0; i < m*n; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
hipMemcpy(dA, A, m*n*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dB, B, m*n*sizeof(double), hipMemcpyHostToDevice);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
MatKernel(dA, m, n, dB, m, n, dC, opn);
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "CUDA error %d", err);
exit(1);
}
hipMemcpy(C, dC, m*n*sizeof(double), hipMemcpyDeviceToHost);
printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
if (dA != NULL) hipFree(dA);
if (dB != NULL) hipFree(dB);
if (dC != NULL) hipFree(dC);
if (C != NULL) free(C);
}
#endif
// Cumulative sum of columns
#if __CUDA_ARCH__ >= 300
__global__ void __cumsumc(int nrows, int ncols, double *A, double *B) {
int i, j, k, lim;
double v, w, sum;
int icol = threadIdx.y + blockDim.y * blockIdx.x;
__syncthreads();
for (i = icol; i < ncols; i += blockDim.y * gridDim.x) {
sum = 0.0f;
for (j = 0; j < nrows; j += blockDim.x) {
v = 0;
if (j + threadIdx.x < nrows) {
v = A[j + threadIdx.x + i * nrows];
}
lim = min(blockDim.x, nrows - j);
#pragma unroll
for (k = 1; k < lim; k = k + k) {
w = __shfl_up(v, k);
if (threadIdx.x >= k) {
v += w;
}
}
v += sum;
if (j + threadIdx.x < nrows) {
B[j + threadIdx.x + i * nrows] = v;
}
sum = __shfl(v, blockDim.x - 1);
}
}
}
#else
__global__ void __cumsumc(int nrows, int ncols, double *A, double *B) {
__shared__ double buff[32];
int i, j, k, lim;
double v, sum;
int icol = threadIdx.y + blockDim.y * blockIdx.x;
__syncthreads();
for (i = icol; i < ncols; i += blockDim.y * gridDim.x) {
sum = 0.0f;
for (j = 0; j < nrows; j += blockDim.x) {
v = 0;
if (j + threadIdx.x < nrows) {
v = A[j + threadIdx.x + i * nrows];
}
__syncthreads();
buff[threadIdx.x] = v;
lim = min(blockDim.x, nrows - j);
#pragma unroll
for (k = 1; k < lim; k = k + k) {
__syncthreads();
if (threadIdx.x >= k) {
v += buff[threadIdx.x - k];
}
__syncthreads();
buff[threadIdx.x] = v;
}
v += sum;
if (j + threadIdx.x < nrows) {
B[j + threadIdx.x + i * nrows] = v;
}
__syncthreads();
sum = buff[31];
__syncthreads();
}
}
}
#endif
int cumsumc(int nrows, int ncols, double *A, double *B) {
if (ncols == 1) {
thrust::device_ptr<double> pa(A);
thrust::device_ptr<double> pb(B);
thrust::inclusive_scan(pa, pa + nrows, pb);
} else {
dim3 threads;
threads.x = 32;
threads.y = min(32, ncols);
int nblocks = min(64, 1 + (ncols-1)/threads.y);
hipLaunchKernelGGL(( __cumsumc), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, A, B);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
| 72e474e86dc427f2a70649aff404eaaf29c84591.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <MatKernel.hpp>
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#if __CUDA_ARCH__ > 200
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
__device__ double op_add(double a, double b) {return a+b;}
__device__ double op_sub(double a, double b) {return a-b;}
__device__ double op_mul(double a, double b) {return a*b;}
__device__ double op_div(double a, double b) {return a/b;}
__device__ double op_gt(double a, double b) {return (a > b) ? 1.0f : 0;}
__device__ double op_lt(double a, double b) {return (a < b) ? 1.0f : 0;}
__device__ double op_eq(double a, double b) {return (a == b) ? 1.0f : 0;}
__device__ double op_ge(double a, double b) {return (a >= b) ? 1.0f : 0;}
__device__ double op_le(double a, double b) {return (a <= b) ? 1.0f : 0;}
__device__ double op_ne(double a, double b) {return (a != b) ? 1.0f : 0;}
__device__ double op_max(double a, double b) {return max(a,b);}
__device__ double op_min(double a, double b) {return min(a,b);}
__device__ double op_atan2(double a, double b) {return atan2f(a, b);}
__device__ double op_pow(double a, double b) {return powf(a, b);}
typedef double (*optype)(double,double);
__device__ const optype operators[] = {
op_add,
op_sub,
op_mul,
op_div,
op_gt,
op_lt,
op_eq,
op_ge,
op_le,
op_ne,
op_max,
op_min,
op_atan2,
op_pow};
__device__ double fn_abs(double a) {return abs(a);}
__device__ double fn_exp(double a) {return expf(a);}
__device__ double fn_log(double a) {return logf(a);}
__device__ double fn_expm1(double a) {return expm1f(a);}
__device__ double fn_sqrt(double a) {return sqrtf(a);}
__device__ double fn_ln(double a) {return logf(a);}
__device__ double fn_log10(double a) {return log10f(a);}
__device__ double fn_log1p(double a) {return log1pf(a);}
__device__ double fn_cos(double a) {return cosf(a);}
__device__ double fn_sin(double a) {return sinf(a);}
__device__ double fn_tan(double a) {return tanf(a);}
__device__ double fn_cosh(double a) {return coshf(a);}
__device__ double fn_sinh(double a) {return sinhf(a);}
__device__ double fn_tanh(double a) {return tanhf(a);}
__device__ double fn_acos(double a) {return acosf(a);}
__device__ double fn_asin(double a) {return asinf(a);}
__device__ double fn_atan(double a) {return atanf(a);}
__device__ double fn_acosh(double a) {return acoshf(a);}
__device__ double fn_asinh(double a) {return asinhf(a);}
__device__ double fn_atanh(double a) {return atanhf(a);}
__device__ double fn_erf(double a) {return erff(a);}
__device__ double fn_erfinv(double a) {return erfinvf(a);}
__device__ double fn_erfc(double a) {return erfcf(a);}
__device__ double fn_erfcinv(double a) {return erfcinvf(a);}
__device__ double fn_gammaln(double a) {return lgammaf(a);}
__device__ double fn_gamma(double a) {return tgammaf(a);}
__device__ double fn_ceil(double a) {return ceilf(a);}
__device__ double fn_floor(double a) {return floorf(a);}
__device__ double fn_round(double a) {return roundf(a);}
__device__ double fn_trunc(double a) {return truncf(a);}
__device__ double fn_sign(double a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);}
__device__ double fn_j0(double a) {return j0f(a);}
__device__ double fn_j1(double a) {return j1f(a);}
//__device__ double fn_jn(double a) {return jnf(a);}
__device__ double fn_y0(double a) {return y0f(a);}
__device__ double fn_y1(double a) {return y1f(a);}
//__device__ double fn_yn(double a) {return ynf(a);}
__device__ double fn_exppsi(double a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;}
__device__ double fn_atan2(double a, double b) {return atan2f(a, b);}
__device__ double fn_pow(double a, double b) {return powf(a, b);}
typedef double (*fntype)(double);
__device__ const fntype fctns[35] = {
fn_abs,
fn_exp,
fn_expm1,
fn_sqrt,
fn_ln,
fn_log10,
fn_log1p,
fn_cos,
fn_sin,
fn_tan,
fn_cosh,
fn_sinh,
fn_tanh,
fn_acos,
fn_asin,
fn_atan,
fn_acosh,
fn_asinh,
fn_atanh,
fn_erf,
fn_erfinv,
fn_erfc,
fn_erfcinv,
fn_gammaln,
fn_gamma,
fn_ceil,
fn_floor,
fn_round,
fn_trunc,
fn_sign,
fn_j0,
fn_j1,
fn_y0,
fn_y1,
fn_exppsi};
__device__ const optype fctns2[2] = {
fn_atan2,
fn_pow};
__global__ void __apply_gfun(double *A, double *B, int N, int opn) {
fntype fn = fctns[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = fn(A[i]);
}
}
void setsizesD(int N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 32;
while (nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < 1024) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
int apply_gfun(double *A, double *B, int N, int opn) {
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
__apply_gfun<<<griddims,nthreads>>>(A, B, N, opn);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
template <class T>
__global__ void __toDouble(T *A, double *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (double)(A[i]);
}
}
__global__ void __toInt(double *A, int *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (int)(A[i]);
}
}
int IntToDouble(int *A, double *B, int N) {
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
__toDouble<int><<<griddims,nthreads>>>(A, B, N);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int FloatToDouble(float *A, double *B, int N) {
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
__toDouble<float><<<griddims,nthreads>>>(A, B, N);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int toInt(double *A, int *B, int N) {
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
__toInt<<<griddims,nthreads>>>(A, B, N);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __full(int *ir, int *ic, double *data, double *od, int nrows, int ncols, int nnz) {
int i, row, col;
double v;
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (i = id; i < nnz; i += blockDim.x * gridDim.x) {
v = data[i];
row = ir[i];
col = ic[i];
od[row + col * nrows] = v;
}
}
int full(int *ir, int *ic, double *data, double *od, int nrows, int ncols, int nnz) {
int nblocks = min(32, 1+(nnz-1)/32);
int nthreads = max(32, min(1+(nnz-1)/nblocks, 1024));
__full<<<nblocks,nthreads>>>(ir, ic, data, od, nrows, ncols, nnz);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_gfun2(double *A, double *B, double *C, int N, int opn) {
optype fn = fctns2[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = fn(A[i], B[i]);
}
}
int apply_gfun2(double *A, double *B, double *C, int N, int opn) {
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
__apply_gfun2<<<griddims,nthreads>>>(A, B, C, N, opn);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __apply_full(double *A, double *B, double *C, int N, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i]);
}
}
__global__ void __apply_right_col(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i % nrows]);
}
}
__global__ void __apply_right_row(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],B[i / nrows]);
}
}
__global__ void __apply_left_col(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i % nrows],B[i]);
}
}
__global__ void __apply_left_row(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i / nrows],B[i]);
}
}
__global__ void __apply_right_val(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
double val = B[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(A[i],val);
}
}
__global__ void __apply_left_val(double *A, double *B, double *C, int nrows, int ncols, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
double val = A[0];
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
C[i] = op(val,B[i]);
}
}
__global__ void __set_val(double *A, double val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
}
int set_val(double *A, double val, int length) {
int nthreads;
dim3 griddims;
setsizesD(length, &griddims, &nthreads);
__set_val<<<griddims,nthreads>>>(A, val, length);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int set_ival(double *A, int val, int length) {
int nthreads;
dim3 griddims;
setsizesD(length, &griddims, &nthreads);
__set_val<<<griddims,nthreads>>>(A, *((double *)&val), length);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int apply_binop(double *A, int Anrows, int Ancols,
double *B, int Bnrows, int Bncols, double *C, int opn) {
int N = max(Anrows, Bnrows)*max(Ancols, Bncols);
int nthreads;
dim3 griddims;
setsizesD(N, &griddims, &nthreads);
if (Anrows == Bnrows && Ancols == Bncols) {
__apply_full<<<griddims,nthreads>>>(A, B, C, N, opn);
} else if (Anrows == Bnrows && Bncols == 1) {
__apply_right_col<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Ancols == Bncols && Bnrows == 1) {
__apply_right_row<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == Bnrows && Ancols == 1) {
__apply_left_col<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Ancols == Bncols && Anrows == 1) {
__apply_left_row<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
} else if (Bnrows == 1 && Bncols == 1) {
__apply_right_val<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn);
} else if (Anrows == 1 && Ancols == 1) {
__apply_left_val<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __sdoprow(int nrows, int ncols, int nnz, double *A, int *Aic, double *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
int col = Aic[i];
double oldA = A[i];
A[i] = op(oldA,B[col]);
}
}
__global__ void __sdopcol(int nrows, int ncols, int nnz, double *A, int *Air, double *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
int row = Air[i];
double oldA = A[i];
A[i] = op(oldA,B[row]);
}
}
__global__ void __sdopval(int nnz, double *A, double *B, int opn) {
optype op = operators[opn];
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
double bval = B[0];
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) {
double oldA = A[i];
A[i] = op(oldA,bval);
}
}
int sdoprow(int nrows, int ncols, int nnz, double *A, int *Aic,
double *B, int len, int opn) {
int nthreads;
dim3 griddims;
setsizes(nnz, &griddims, &nthreads);
if (len > 1) {
__sdoprow<<<griddims,nthreads>>>(nrows, ncols, nnz, A, Aic, B, opn);
} else {
__sdopval<<<griddims,nthreads>>>(nnz, A, B, opn);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int sdopcol(int nrows, int ncols, int nnz, double *A, int *Air,
double *B, int len, int opn) {
int nthreads;
dim3 griddims;
setsizes(nnz, &griddims, &nthreads);
if (len > 1) {
__sdopcol<<<griddims,nthreads>>>(nrows, ncols, nnz, A, Air, B, opn);
} else {
__sdopval<<<griddims,nthreads>>>(nnz, A, B, opn);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
// Implement B[I,J] = A
// indexed copy: version with one block per column
#define COPYTOINDS2DA(DFNAME,IEXPR,JEXPR) \
__global__ void __copyToInds2D##DFNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[IEXPR + icol * ldb] = A[i + iblock * lda]; \
} \
} \
}
COPYTOINDS2DA(nn,I[i],J[iblock])
COPYTOINDS2DA(xn,i,J[iblock])
COPYTOINDS2DA(nx,I[i],iblock)
COPYTOINDS2DA(xx,i,iblock)
// Implement B[I,J] = A
// indexed copy: version with one thread per element
#define COPYTOINDS2DB(DFNAME,IEXPR,JEXPR) \
__global__ void __copyToInds2DB##DFNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[IEXPR + JEXPR * ldb] = A[irow + icol * lda]; \
} \
}
COPYTOINDS2DB(nn,I[irow],J[icol])
COPYTOINDS2DB(xn,irow,J[icol])
COPYTOINDS2DB(nx,I[irow],icol)
COPYTOINDS2DB(xx,irow,icol)
// Implement B[I,J] = A
int copyToInds2D(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((double)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyToInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyToInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
// Implement B = A[I,J]
// indexed copy: version with one block per column
#define COPYFROMINDS2DA(FNAME,IEXPR,JEXPR) \
__global__ void __copyFromInds2D##FNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[i + iblock * ldb] = A[IEXPR + icol * lda]; \
} \
} \
}
COPYFROMINDS2DA(nn,I[i],J[iblock])
COPYFROMINDS2DA(xn,i,J[iblock])
COPYFROMINDS2DA(nx,I[i],iblock)
COPYFROMINDS2DA(xx,i,iblock)
// Implement B = A[I,J]
// indexed copy: version with one thread per element
#define COPYFROMINDS2DB(FNAME,IEXPR,JEXPR) \
__global__ void __copyFromInds2DB##FNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[irow + icol * ldb] = A[IEXPR + JEXPR * lda]; \
} \
}
COPYFROMINDS2DB(nn,I[irow],J[icol])
COPYFROMINDS2DB(xn,irow,J[icol])
COPYFROMINDS2DB(nx,I[irow],icol)
COPYFROMINDS2DB(xx,irow,icol)
// Implement B = A[I,J]
int copyFromInds2D(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dsmult(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
double sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[i + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[i + nrows * Bic[j]], sum);
sum = 0;
}
}
}
}
__global__ void __dsmultx(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
double sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum);
sum = 0;
}
}
}
int dsmult(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
__dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
__dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dsmult_tune(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C, int nblocks, int nthreads) {
__dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dsmultx_tune(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C, int nblocks, int nthreadsx, int nthreadsy) {
dim3 threadDim(nthreadsx, nthreadsy, 1);
__dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dsmultT(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
double aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]);
}
}
}
__global__ void __dsmultTx(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
double aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[threadIdx.x + nrows * Bic[j]];
}
atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]);
}
}
int dsmultT(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
__dsmultTx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
__dsmultT<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Aic[i]], P[i]);
}
}
__global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Air[i]], P[i]);
}
}
int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B, int n) {
int nthreads = max(32, min(128, nnz));
int nblks = min(65536, max(1, (nnz-1) / 128));
if (n == 1) {
__spsum1<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B);
} else {
__spsum2<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P);
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cic, double *P);
__global__ void __reduce1op(int nrows, int ncols, double *A, double *B, int opn);
__global__ void __reducebin1op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr);
#define DDS_BLKY 32
#if __CUDA_ARCH__ > 200
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
double sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
for (int i = 1; i < blockDim.x; i *= 2) {
double tmp = __shfl_down(sum, i);
if (threadIdx.x + i < blockDim.x) sum = sum + tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&P[j], sum);
}
}
}
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) {
__shared__ double merge[32];
int jstart = ((long long)blockIdx.x) * ncols / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int aoff, boff;
double user, prod, sum, bsum;
for (int j0 = jstart; j0 < jend ; j0++) {
boff = nrows * j0;
user = B[tid + boff];
for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) {
aoff = nrows * Cir[j];
prod = A[tid + aoff] * user;
sum = prod + __shfl_down(prod, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
bsum = __shfl(sum, 0);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
merge[threadIdx.x] = bsum;
}
__syncthreads();
if (threadIdx.y == 0) {
sum = merge[threadIdx.x];
sum = sum + __shfl_down(sum, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
if (threadIdx.x == 0) {
P[j] = sum;
}
}
}
}
}
#else
__global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
__shared__ double parts[32*DDS_BLKY];
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
double sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
parts[tid] = sum;
for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) {
__syncthreads();
if (i + tid < blockDim.x * blockDim.y) {
parts[tid] = parts[tid] + parts[i + tid];
}
}
__syncthreads();
if (tid == 0) {
P[j] = parts[0];
}
__syncthreads();
}
}
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) {}
#endif
int dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) {
dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,nnz/128));
__dds<<<nblocks,blockDims>>>(nrows, nnz, A, B, Cir, Cic, P);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cic, double *P) {
dim3 blockDims(32, 32, 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,ncols/64));
__dds0<<<nblocks,blockDims>>>(nrows, ncols, A, B, Cir, Cic, P);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
__global__ void __reduce1op(int nrows, int ncols, double *A, double *B, int opn) {
optype op = operators[opn];
int basecol = threadIdx.y + blockDim.y * blockIdx.x;
double v;
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) {
v = 0;
if (threadIdx.x < nrows) v = A[threadIdx.x + icol * nrows];
for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) {
v = op(v, A[i + icol * nrows]);
}
for (int i = 1; i < blockDim.x; i *= 2) {
v = op(v, __shfl_down(v, i));
}
if (threadIdx.x == 0) {
B[icol] = v;
}
}
}
#else
__global__ void __reduce1op(int nrows, int ncols, double *A, double *B, int opn) {
__shared__ double parts[32][33];
optype op = operators[opn];
double v;
for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) {
v = 0;
if (threadIdx.x < nrows) v = A[threadIdx.x + icol * nrows];
for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]);
}
}
if (threadIdx.x == 0) {
B[icol] = parts[0][threadIdx.y];
}
__syncthreads();
}
}
#endif
int reduce1op(int nrows, int ncols, double *A, double *B, int opn) {
int blkx = 32;
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
__reduce1op<<<nblks,blkdims>>>(nrows, ncols, A, B, opn);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
__global__ void __reducebin1op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr) {
optype opbf = operators[opb];
optype oprf = operators[opr];
int basecol = threadIdx.y + blockDim.y * blockIdx.x;
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) {
double v = 0;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
v = oprf(v, opbf(A[i + icol * nrows], B[i + icol * nrows]));
}
for (int i = 1; i < blockDim.x; i *= 2) {
v = oprf(v, __shfl_down(v, i));
}
if (threadIdx.x == 0) {
C[icol] = v;
}
}
}
#else
__global__ void __reducebin1op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr) {
__shared__ double parts[32][33];
optype opbf = operators[opb];
optype oprf = operators[opr];
for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) {
double v = 0;
for (int irow = threadIdx.x; irow < nrows; irow += blockDim.x) {
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows]));
}
parts[threadIdx.x][threadIdx.y] = v;
for (int i = 1; i < blockDim.x; i *= 2) {
if (i + threadIdx.x < blockDim.x) {
parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]);
}
}
if (threadIdx.x == 0) {
C[icol] = parts[0][threadIdx.y];
}
__syncthreads();
}
}
#endif
int reducebin1op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
__reducebin1op<<<nblks,blkdims>>>(nrows, ncols, A, B, C, opb, opr);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#define BLOCKDIM 32
__global__ void __transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) {
int gridx = min(32, 1+(nrows-1)/256);
int gridy = min(32, 1+(ncols-1)/256);
const dim3 griddims(gridx, gridy, 1);
const dim3 blockdims(BLOCKDIM,16,1);
cudaError_t err;
int dev = -1;
cudaGetDevice(&dev);
__transpose<<<griddims,blockdims>>>(in, instride, out, outstride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "cuda error device %d in transpose of %dx%d matrix", dev, nrows, ncols);
return err;
}
return 0;
}
__global__ void __reduce2op(int nrows, int ncols, double *A, double *B, int opn) {
__shared__ double parts[32][33];
optype op = operators[opn];
int baserow = threadIdx.x + blockDim.x * blockIdx.x;
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) {
double v = A[irow + threadIdx.y * nrows];
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) {
v = op(v, A[irow + icol * nrows]);
}
parts[threadIdx.x][threadIdx.y] = v;
__syncthreads();
double newv = 0;
for (int i = 1; i < blockDim.y; i *= 2) {
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y];
__syncthreads();
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv);
__syncthreads();
}
if (threadIdx.y == 0) {
B[irow] = parts[threadIdx.x][0];
}
__syncthreads();
}
}
int reduce2op(int nrows, int ncols, double *A, double *B, int opn) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
__reduce2op<<<nblks,blkdims>>>(nrows, ncols, A, B, opn);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __reducebin2op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr) {
__shared__ double parts[32][33];
optype opbf = operators[opb];
optype oprf = operators[opr];
int baserow = threadIdx.x + blockDim.x * blockIdx.x;
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) {
double v = opbf(A[irow + threadIdx.y * nrows], B[irow + threadIdx.y * nrows]);
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) {
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows]));
}
parts[threadIdx.x][threadIdx.y] = v;
__syncthreads();
double newv = 0;
for (int i = 1; i < blockDim.y; i *= 2) {
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y];
__syncthreads();
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], newv);
__syncthreads();
}
if (threadIdx.y == 0) {
C[irow] = parts[threadIdx.x][0];
}
__syncthreads();
}
}
int reducebin2op(int nrows, int ncols, double *A, double *B, double *C, int opb, int opr) {
int blkx = min(32, nrows);
int blky = min(32, ncols);
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16))));
const dim3 blkdims(blkx,blky,1);
__reducebin2op<<<nblks,blkdims>>>(nrows, ncols, A, B, C, opb, opr);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __embedmat2d(double *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
double v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
b[i] = (long long)vi + (((long long)(i/nrows+1))<<32);
}
}
__global__ void __embedmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
double v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
c[i] = (long long)vi + (((long long)b[i])<<32);
}
}
int embedmat2d(double *a, long long *b, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizesD(nrows*ncols, &griddims, &nthreads);
__embedmat2d<<<griddims,nthreads>>>(a, b, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int embedmat(double *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizesD(n, &griddims, &nthreads);
__embedmat<<<griddims,nthreads>>>(a, b, c, n);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __extractmat2d(double *a, long long *b, int nrows, int ncols) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&b[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
}
}
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
}
int extractmat2d(double *a, long long *b, int nrows, int ncols) {
int nthreads;
dim3 griddims;
setsizesD(nrows*ncols, &griddims, &nthreads);
__extractmat2d<<<griddims,nthreads>>>(a, b, nrows, ncols);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int extractmat(double *a, int *b, long long *c, int n) {
int nthreads;
dim3 griddims;
setsizesD(n, &griddims, &nthreads);
__extractmat<<<griddims,nthreads>>>(a, b, c, n);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/reverse.h>
int fsort2d(double *pkeys, unsigned int *pvals, int nrows, int ncols, int asc) {
for (int i = 0; i < ncols; i++) {
thrust::device_ptr<double> keys(pkeys+i*nrows);
thrust::device_ptr<unsigned int> vals(pvals+i*nrows);
if (asc > 0) {
thrust::sort_by_key(keys, keys + nrows, vals);
} else {
thrust::sort_by_key(keys, keys + nrows, vals, thrust::greater<double>());
}
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int fsort(double *pkeys, int N, int asc) {
thrust::device_ptr<double> keys(pkeys);
if (asc > 0) {
thrust::sort(keys, keys + N);
} else {
thrust::sort(keys, keys + N, thrust::greater<int>());
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int fsorts(double *pkeys, unsigned int *pvals, int *jc, int m, int asc) {
for (int i = 0; i < m; i++) {
thrust::device_ptr<double> keys(pkeys + jc[i]);
thrust::device_ptr<unsigned int> vals(pvals + jc[i]);
int b = jc[i+1] - jc[i];
if (asc > 0) {
thrust::sort_by_key(keys, keys + b, vals);
} else {
thrust::sort_by_key(keys, keys + b, vals, thrust::greater<double>());
}
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
// This path may break. If so look for radixsort_api.h in /usr/local/cuda/include
// and fix the path below.
using namespace thrust::system::cuda::detail::detail::b40c_thrust;
int fsortsizexD(int N) {
RadixSortingEnactor<double,unsigned int> sorter(N);
return sorter.SpineElements();
}
int fsort2dx(double *pkeys, unsigned int *pvals, double *tkeys, unsigned int *tvals,
int *ispine, bool * bflags, int nrows, int ncols, int asc) {
int i;
cudaError_t err;
RadixSortingEnactor<double,unsigned int> sorter(nrows);
RadixSortStorage<double,unsigned int> storage;
storage.d_spine = ispine;
storage.d_from_alt_storage = bflags;
storage.using_alternate_storage = false;
for (i = 0; i < ncols; i++) {
storage.d_keys = pkeys+i*nrows;
storage.d_values = pvals+i*nrows;
storage.d_alt_keys = tkeys;
storage.d_alt_values = tvals;
if (asc == 0) {
thrust::device_ptr<double> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+nrows);
thrust::reverse(vals, vals+nrows);
}
cudaDeviceSynchronize();
sorter.EnactSort(storage);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err > 0) return err;
if (asc == 0) {
thrust::device_ptr<double> keys(storage.d_keys);
thrust::device_ptr<unsigned int> vals(storage.d_values);
thrust::reverse(keys, keys+nrows);
thrust::reverse(vals, vals+nrows);
}
cudaDeviceSynchronize();
if (storage.d_keys == tkeys) {
cudaMemcpy(pkeys+i*nrows, tkeys, nrows*sizeof(double), cudaMemcpyDeviceToDevice);
}
if (storage.d_values == tvals) {
cudaMemcpy(pvals+i*nrows, tvals, nrows*sizeof(unsigned int), cudaMemcpyDeviceToDevice);
}
}
return err;
}
__global__ void __stratify(double *strata, int n, double *a, double *b, unsigned int *bi, int stride) {
__shared__ double ss[32];
__shared__ unsigned int ibin[32];
__shared__ unsigned int ebin[32];
__shared__ unsigned int todo[32];
__shared__ double bins[64][33];
__shared__ unsigned int topush;
int tid = threadIdx.x;
ss[tid] = strata[tid];
ibin[tid] = 0;
for (int i = 0; i < n; i += blockDim.x * gridDim.x) {
int ii = i + tid + blockDim.x * blockIdx.x;
if (tid == 0) topush = 0;
if (ii < n) {
double v = a[ii];
int j = 1;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = (v > ss[j-1]) ? 2*j+1 : 2*j;
j = j - 32;
int k = atomicInc(&ibin[j], 256);
bins[k][j] = v;
if (k == 31) {
k = atomicInc(&topush, 1024);
todo[k] = j;
}
}
if (ibin[tid] >= 32) {
ebin[tid] = atomicAdd(&bi[tid], 32);
ibin[tid] = ibin[tid] - 32;
}
for (int k = 0; k < topush; k++) {
int j = todo[k];
b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j];
}
}
ebin[tid] = atomicAdd(&bi[tid], ibin[tid]);
for (int j = 0; j < 32; j++) {
if (tid < ibin[j]) {
b[j*stride + ebin[j] + tid] = bins[tid][j];
}
}
}
int stratify(double *strata, int n, double *a, double *b, unsigned int *bi, int stride) {
__stratify<<<40,32>>>(strata, n, a, b, bi, stride);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#define SNDVALS 256
#define SNDGRPS 4
#define SNTHREADS 1024
#define SBIGBLK (4*1024)
__global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ double ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
double v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
int stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1);
const dim3 griddims(8,1,1);
__stratifycounts<<<griddims,blockdims>>>(strata, n, a, bi);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#define RNDVALS 256
#define RNTHREADS 256
#define RNDBITS 8
#define RBIGBLK (4*1024)
__global__ void __radixcounts(double *a, int n, int digit, unsigned int *bi) {
__shared__ unsigned int ic[RNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int tid = threadIdx.x;
int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK);
for (int i = istart; i < iend; i += RBIGBLK) {
__syncthreads();
ic[threadIdx.x] = 0;
__syncthreads();
for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) {
double v = a[j];
unsigned char *cv = (unsigned char *)&v;
atomicInc(&ic[cv[digit]], 65536*32767);
}
__syncthreads();
bi[bibase + threadIdx.x] = ic[threadIdx.x];
bibase += RNDVALS;
}
}
int radixcounts(double *a, int n, int digit, unsigned int *bi) {
const dim3 blockdims(RNTHREADS,1,1);
const dim3 griddims(32,1,1);
__radixcounts<<<griddims,blockdims>>>(a, n, digit, bi);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
#define GENDISTS(DFNAME,DFUNC) \
__global__ void DFNAME(double *A, int lda, double *B, int ldb, double *C, \
int ldc, int d, int nrows, int ncols, double p) { \
int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \
int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \
double va, vb, vc; \
double R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \
R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \
int xi = threadIdx.x + xblk; \
int yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
} \
yi = threadIdx.x + yblk; \
int nbr = (threadIdx.x + 1) % blockDim.x; \
for (int i = 0; i < d; i++) { \
va = (xi < nrows) ? A[xi + i * lda] : 0; \
vb = (yi < ncols) ? B[yi + i * ldb] : 0; \
vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \
vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \
vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \
vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \
vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \
vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \
vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \
vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \
vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \
vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \
vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \
vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \
vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \
vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \
vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \
vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \
} \
yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \
} \
}
GENDISTS(__l1dist,vc+=abs(va-vb))
GENDISTS(__l2dist,vc+=(va-vb)*(va-vb))
GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p))
GENDISTS(__linfdist,vc=max(vc,abs(va-vb)))
GENDISTS(__msum,vc=max(vc,va+vb))
#else
__global__ void __l1dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Lidist not supported on arch <= 200\n");
}
__global__ void __l2dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, L2dist not supported on arch <= 200\n");
}
__global__ void __minkowskidist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Minkowski distance not supported on arch <= 200\n");
}
__global__ void __linfdist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Max-abs distance not supported on arch <= 200\n");
}
__global__ void __msum(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
printf("Warning, Max-sum multiply not supported on arch <= 200\n");
}
#endif
int dists(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
// cudaSetDevice(ithread);
if (p == 0.0f) {
__linfdist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 1.0f) {
__l1dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 2.0f) {
__l2dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else {
__minkowskidist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int maxsumx(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
__msum<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, 0);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
__shared__ T tots[32];
int start, end, ij;
int bid = blockIdx.y + blockIdx.z * blockDim.y; // column index
T sum, tsum, tmp, ttot, ttot0;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
start = jc[ij] + bid * nrows;
end = jc[ij+1] + bid * nrows;
sum = 0;
for (int i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
tsum = in[i];
tmp = __shfl_up(tsum, 1);
if (threadIdx.x >= 1) tsum += tmp;
tmp = __shfl_up(tsum, 2);
if (threadIdx.x >= 2) tsum += tmp;
tmp = __shfl_up(tsum, 4);
if (threadIdx.x >= 4) tsum += tmp;
tmp = __shfl_up(tsum, 8);
if (threadIdx.x >= 8) tsum += tmp;
tmp = __shfl_up(tsum, 16);
if (threadIdx.x >= 16) tsum += tmp;
ttot = __shfl(tsum, min(end-start-1, 31));
ttot0 = ttot;
__syncthreads();
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
__syncthreads();
for (int k = 1; k < blockDim.y; k *= 2) {
if (threadIdx.y >= k) {
if (threadIdx.x == threadIdx.y - k) {
ttot += tots[threadIdx.x];
}
}
__syncthreads();
if (threadIdx.y >= k) {
ttot = __shfl(ttot, threadIdx.y - k);
if (threadIdx.x == threadIdx.y) {
tots[threadIdx.y] = ttot;
}
}
__syncthreads();
}
out[i] = sum + tsum + ttot - ttot0;
if (threadIdx.x == blockDim.y - 1) {
ttot = tots[threadIdx.x];
}
__syncthreads();
ttot = __shfl(ttot, blockDim.y - 1);
sum += ttot;
}
}
}
}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k, start, end, ij;
int bid = blockIdx.y + blockIdx.z * gridDim.y;
if (bid < ncols) {
for (ij = blockIdx.x; ij < m; ij += gridDim.x) {
vmax = maxminv;
imax = -1;
start = jc[ij];
end = jc[ij+1];
for (i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[ij + m * bid] = vmax;
outi[ij + m * bid] = imax;
}
}
}
}
}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T maxminv, int dir) {
__shared__ T maxv[32];
__shared__ int maxi[32];
T vmax, vtmp;
int imax, itmp, i, k;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
if (bid < ncols) {
vmax = maxminv;
imax = -1;
for (i = threadIdx.x + threadIdx.y * blockDim.x; i < nrows; i += blockDim.x * blockDim.y) {
vtmp = in[i + nrows * bid];
itmp = i;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
for (k = 1; k < blockDim.x; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
vmax = __shfl(vmax, blockDim.x - 1);
imax = __shfl(imax, blockDim.x - 1);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
maxv[threadIdx.y] = vmax;
maxi[threadIdx.y] = imax;
}
__syncthreads();
if (threadIdx.y == 0) {
vmax = maxv[threadIdx.x];
imax = maxi[threadIdx.x];
}
__syncthreads();
if (threadIdx.y == 0) {
for (k = 1; k < blockDim.y; k *= 2) {
vtmp = __shfl_up(vmax, k);
itmp = __shfl_up(imax, k);
if (threadIdx.x >= k) {
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
}
if (threadIdx.x == blockDim.y - 1) {
out[bid] = vmax;
outi[bid] = imax;
}
}
__syncthreads();
}
}
// Not very fast for wide matrices
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
T vmax, vtmp;
int imax, itmp, i, j;
for (i = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); i < nrows; i += blockDim.x * blockDim.y * gridDim.x) {
if (ncols > 0) {
vmax = in[i];
imax = 0;
for (j = 1; j < ncols; j++) {
vtmp = in[i + nrows * j];
itmp = j;
if (dir ? (vtmp > vmax) : (vtmp < vmax)) {
vmax = vtmp;
imax = itmp;
}
}
out[i] = vmax;
outi[i] = imax;
}
}
}
#else
template<class T>
__global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {}
template<class T>
__global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {}
template<class T>
__global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {}
template<class T>
__global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {}
#endif
void setindsD(int ncols, int &nc1, int &nc2) {
if (ncols < 65536) {
nc1 = ncols;
nc2 = 1;
} else {
nc1 = (int)sqrt((double)ncols);
nc2 = 1 + (ncols-1)/nc1;
}
}
template<class T>
int cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
__cumsumg<T><<<grid,tblock>>>(in, out, jc, nrows, ncols, m);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int cumsumgf(double *in, double *out, int *jc, int nrows, int ncols, int m) {
return cumsumg<double>(in, out, jc, nrows, ncols, m);
}
template<class T>
int maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(min(64, m), nc1, nc2);
int ny = min(32, 1+nrows/m/32);
dim3 tblock(32, ny, 1);
__maxming<T><<<grid,tblock>>>(in, out, outi, jc, nrows, ncols, m, minv, dir);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
// JFC: problem here ncols a non-multiple of 16, and nrows < 32.
template<class T>
int maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {
int nc1, nc2;
setindsD(ncols, nc1, nc2);
dim3 grid(nc1, nc2, 1);
int ny = min(32, 1+nrows/32);
dim3 tblock(32, ny, 1);
__maxmini_cols<T><<<grid,tblock>>>(in, out, outi, nrows, ncols, minv, dir);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
template<class T>
int maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {
int nb = min(32,1+nrows/32);
dim3 grid(nb,1,1);
int ny = min(32, 1+nrows/nb/32);
dim3 tblock(32, ny, 1);
__maxmini_rows<T><<<grid,tblock>>>(in, out, outi, nrows, ncols, dir);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int maxgf(double *in, double *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<double>(in, out, outi, jc, nrows, ncols, m, -3e38f, 1);
}
int mingf(double *in, double *out, int *outi, int *jc, int nrows, int ncols, int m) {
return maxming<double>(in, out, outi, jc, nrows, ncols, m, 3e38f, 0);
}
int maxif(double *in, double *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<double>(in, out, outi, nrows, ncols, -3e38f, 1);
} else if (dir == 2) {
return maxmini_rows<double>(in, out, outi, nrows, ncols, 1);
} else {
return -1;
}
}
int minif(double *in, double *out, int *outi, int nrows, int ncols, int dir) {
if (dir == 1) {
return maxmini_cols<double>(in, out, outi, nrows, ncols, 3e38f, 0);
} else if (dir == 2) {
return maxmini_rows<double>(in, out, outi, nrows, ncols, 0);
} else {
return -1;
}
}
__global__ void __dmv(double *a, int nrows, int ncols, double *b, double *c) {
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
double accum = 0.0;
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
accum += a[tx+nrows*ty] * b[ty];
}
atomicAdd(&c[tx], accum);
}
}
#if __CUDA_ARCH__ > 200
__global__ void __dmvt(double *a, int nrows, int ncols, double *b, double *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
double accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
for (int i = 1; i < blockDim.x; i *= 2) {
double tmp = __shfl_down(accum, i);
if (threadIdx.x + i < blockDim.x) accum += tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&c[ty], accum);
}
}
}
#else
__global__ void __dmvt(double *a, int nrows, int ncols, double *b, double *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
double accum = 0.0;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
atomicAdd(&c[ty], accum);
}
}
#endif
__global__ void __dmv0(double *a, int nrows, int ncols, int tstep, double *b, double *c) {
double accum = 0.0f;
int tx = threadIdx.x + blockDim.x * blockIdx.x;
if (tx < tstep) {
for (; tx < nrows*ncols; tx += tstep) {
int icol = tx / nrows;
accum += a[tx] * b[icol];
}
int irow = tx % nrows;
atomicAdd(&c[irow], accum);
}
}
int dmv(double *a, int nrows, int ncols, double *b, double *c, int trans) {
if (trans == 1) {
int ntx = min(32, nrows);
int nty = min(32, ncols);
int nbx = min(256, 1 + nrows/ntx/8);
int nby = min(256, 1 + ncols/nty/2);
dim3 blockdims(ntx,nty,1);
dim3 griddims(nbx,nby,1);
__dmvt<<<griddims,blockdims>>>(a, nrows, ncols, b, c);
} else {
int ntx = min(1024, nrows*ncols);
int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32);
int tstep = (ntx*nbx/nrows)*nrows;
__dmv0<<<nbx,ntx>>>(a, nrows, ncols, tstep, b, c);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#define ACCUM_KERNEL(TI,TJ,TV,TS,II,IJ,IV) \
__global__ void __accum(TI, TJ, TV, TS, int m, int nrows) { \
int istart = ((int)(((long long)blockIdx.x) * m / gridDim.x)); \
int iend = ((int)(((long long)blockIdx.x + 1) * m / gridDim.x)); \
istart = (istart / 32) * 32; \
if (blockIdx.x != gridDim.x - 1) { \
iend = (iend / 32) * 32; \
} \
for (int i = istart + threadIdx.x; i < iend; i+= blockDim.x) { \
atomicAdd(&S[II + nrows * IJ], IV); \
} \
} \
int accum(TI, TJ, TV, TS, int m, int nrows) { \
int nthreads = max(32, min(512, m)); \
int nblocks = max(1, min(65535, m/nthreads/8)); \
__accum<<<nblocks,nthreads>>>(I,J,V,S,m,nrows); \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
ACCUM_KERNEL(int*I, int*J, double*V, double*S, I[i], J[i], V[i])
ACCUM_KERNEL(int*I, int J, double*V, double*S, I[i], J, V[i])
ACCUM_KERNEL(int I, int*J, double*V, double*S, I, J[i], V[i])
ACCUM_KERNEL(int*I, int*J, double V, double*S, I[i], J[i], V)
ACCUM_KERNEL(int*I, int J, double V, double*S, I[i], J, V)
ACCUM_KERNEL(int I, int*J, double V, double*S, I, J[i], V)
const int INBLOCK = 4;
// copy and transpose columns of the input matrix into the output matrix. nrows refers to the input matrix
// (and so is ncols for the output). ncols is the length of the iptrs array, which will be the number of
// rows of the output matrix. iptrs specifies the columns of the input array to copy.
// outstride is stride of the output matrix
__global__ void __icopy_transpose(int *iptrs, double *in, double *out, int outstride, int nrows, int ncols) {
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x + xb + iptrs[y]*nrows];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int icopy_transpose(int *iptrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__icopy_transpose<<<griddims,blockdims>>>(iptrs, in, out, stride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in icopy_transpose"); return err;}
return 0;
}
// copy and transpose the input matrix into columns of the output matrix. nrows, ncols refer to output matrix
__global__ void __ocopy_transpose(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
out[optrs[y]*nrows + threadIdx.x + xb] = tile[threadIdx.x][y-yb];
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_add(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicAdd(&out[optrs[y]*nrows + threadIdx.x + xb], tile[threadIdx.x][y-yb]);
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_min(int *optrs, double *in, double *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ double tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicMin((int *)&out[optrs[y]*nrows + threadIdx.x + xb], *(int *)(&tile[threadIdx.x][y-yb]));
}
}
__syncthreads();
}
}
}
int ocopy_transpose_add(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose_add<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose_min(int *optrs, double *in, double *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose_min<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
#ifdef TEST
int main(int argc, char **argv) {
int m=8, n=8, opn = 0;
double *dA, *dB, *dC, *A, *B, *C;
if (argc > 1) {
sscanf(argv[1], "%d", &opn);
if (argc > 2) {
sscanf(argv[2], "%d", &m);
if (argc > 3) {
sscanf(argv[3], "%d", &n);
}
}
}
A = (double *)malloc(m*n*sizeof(double));
B = (double *)malloc(m*n*sizeof(double));
C = (double *)malloc(m*n*sizeof(double));
cudaMalloc((void**)&dA, m*n*sizeof(double));
cudaMalloc((void**)&dB, m*n*sizeof(double));
cudaMalloc((void**)&dC, m*n*sizeof(double));
for (int i = 0; i < m*n; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
cudaMemcpy(dA, A, m*n*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, m*n*sizeof(double), cudaMemcpyHostToDevice);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
MatKernel(dA, m, n, dB, m, n, dC, opn);
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "CUDA error %d", err);
exit(1);
}
cudaMemcpy(C, dC, m*n*sizeof(double), cudaMemcpyDeviceToHost);
printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]);
printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]);
printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]);
if (dA != NULL) cudaFree(dA);
if (dB != NULL) cudaFree(dB);
if (dC != NULL) cudaFree(dC);
if (C != NULL) free(C);
}
#endif
// Cumulative sum of columns
#if __CUDA_ARCH__ >= 300
__global__ void __cumsumc(int nrows, int ncols, double *A, double *B) {
int i, j, k, lim;
double v, w, sum;
int icol = threadIdx.y + blockDim.y * blockIdx.x;
__syncthreads();
for (i = icol; i < ncols; i += blockDim.y * gridDim.x) {
sum = 0.0f;
for (j = 0; j < nrows; j += blockDim.x) {
v = 0;
if (j + threadIdx.x < nrows) {
v = A[j + threadIdx.x + i * nrows];
}
lim = min(blockDim.x, nrows - j);
#pragma unroll
for (k = 1; k < lim; k = k + k) {
w = __shfl_up(v, k);
if (threadIdx.x >= k) {
v += w;
}
}
v += sum;
if (j + threadIdx.x < nrows) {
B[j + threadIdx.x + i * nrows] = v;
}
sum = __shfl(v, blockDim.x - 1);
}
}
}
#else
__global__ void __cumsumc(int nrows, int ncols, double *A, double *B) {
__shared__ double buff[32];
int i, j, k, lim;
double v, sum;
int icol = threadIdx.y + blockDim.y * blockIdx.x;
__syncthreads();
for (i = icol; i < ncols; i += blockDim.y * gridDim.x) {
sum = 0.0f;
for (j = 0; j < nrows; j += blockDim.x) {
v = 0;
if (j + threadIdx.x < nrows) {
v = A[j + threadIdx.x + i * nrows];
}
__syncthreads();
buff[threadIdx.x] = v;
lim = min(blockDim.x, nrows - j);
#pragma unroll
for (k = 1; k < lim; k = k + k) {
__syncthreads();
if (threadIdx.x >= k) {
v += buff[threadIdx.x - k];
}
__syncthreads();
buff[threadIdx.x] = v;
}
v += sum;
if (j + threadIdx.x < nrows) {
B[j + threadIdx.x + i * nrows] = v;
}
__syncthreads();
sum = buff[31];
__syncthreads();
}
}
}
#endif
int cumsumc(int nrows, int ncols, double *A, double *B) {
if (ncols == 1) {
thrust::device_ptr<double> pa(A);
thrust::device_ptr<double> pb(B);
thrust::inclusive_scan(pa, pa + nrows, pb);
} else {
dim3 threads;
threads.x = 32;
threads.y = min(32, ncols);
int nblocks = min(64, 1 + (ncols-1)/threads.y);
__cumsumc<<<nblocks,threads>>>(nrows, ncols, A, B);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
|
4208fb51470eccb82f5554c6c6adbcde8e138488.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void checkInex(void) {
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) gridDim:(%d, %d, %d)\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y,
blockDim.z, gridDim.x, gridDim.y, gridDim.z);
return;
}
int main(int argc, char *argv[]) {
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
// check grid and block dimension from device side
hipLaunchKernelGGL(( checkInex), dim3(grid), dim3(block), 0, 0, );
// reset device before you leave
hipDeviceReset();
return 0;
}
| 4208fb51470eccb82f5554c6c6adbcde8e138488.cu | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkInex(void) {
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) gridDim:(%d, %d, %d)\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y,
blockDim.z, gridDim.x, gridDim.y, gridDim.z);
return;
}
int main(int argc, char *argv[]) {
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
// check grid and block dimension from device side
checkInex<<<grid, block>>>();
// reset device before you leave
cudaDeviceReset();
return 0;
}
|
e0ab60ab53d9a18addac502e2c243479256581ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lodepng.h"
#include <stdio.h>
#include <stdlib.h>
/********************************************************************************
This CUDA program demonstrates how do process image using CUDA. This program
will take image and performs Gaussian Blur filtering then saves the blurred
image.
Compile with:
nvcc task3_partB.cu -o task3_partB lodepng.cpp
To run:
./task3_partB "input filename"
Author: Sasmita Gurung
University Email: [email protected]
**********************************************************************************/
__device__ unsigned int deviceWidth;
//Getting Red pixels
__device__ unsigned char getRed(unsigned char *image, unsigned int row, unsigned int column)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4);
return image[i];
}
//Getting Green pixels
__device__ unsigned char getGreen(unsigned char *image, unsigned int row, unsigned int column)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 1;
return image[i];
}
//Getting Blue pixels
__device__ unsigned char getBlue(unsigned char *image, unsigned int row, unsigned int column)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 2;
return image[i];
}
//Getting Alpha pixels (transperancy)
__device__ unsigned char getAlpha(unsigned char *image, unsigned int row, unsigned int column)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 3;
return image[i];
}
//Setting Red value
__device__ void setRed(unsigned char *image, unsigned int row, unsigned int column, unsigned char red)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4);
image[i] = red;
}
//Setting Green value
__device__ void setGreen(unsigned char *image, unsigned int row, unsigned int column, unsigned char green)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 1;
image[i] = green;
}
//Setting Blue value
__device__ void setBlue(unsigned char *image, unsigned int row, unsigned int column, unsigned char blue)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 2;
image[i] = blue;
}
//Setting Alpha value
__device__ void setAlpha(unsigned char *image, unsigned int row, unsigned int column, unsigned char alpha)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 3;
image[i] = alpha;
}
__global__ void applyGaussianBlurr(unsigned char* image, unsigned char* newImage, unsigned int *width){
int row = blockIdx.x+1;
int column = threadIdx.x+1;
deviceWidth = *width;
unsigned redTL, redTC, redTR;
unsigned redL, redC, redR;
unsigned redBL, redBC, redBR;
unsigned newRed;
unsigned greenTL, greenTC, greenTR;
unsigned greenL, greenC, greenR;
unsigned greenBL, greenBC, greenBR;
unsigned newGreen;
unsigned blueTL, blueTC, blueTR;
unsigned blueL, blueC, blueR;
unsigned blueBL, blueBC, blueBR;
unsigned newBlue;
setGreen(newImage, row, column, getGreen(image, row, column));
setBlue(newImage, row, column, getBlue(image, row, column));
setAlpha(newImage, row, column, 255);
redTL = getRed(image, row - 1, column - 1);
redTC = getRed(image, row - 1, column);
redTR = getRed(image, row - 1, column + 1);
redL = getRed(image, row, column - 1);
redC = getRed(image, row, column);
redR = getRed(image, row, column + 1);
redBL = getRed(image, row + 1, column - 1);
redBC = getRed(image, row + 1, column);
redBR = getRed(image, row + 1, column + 1);
newRed = (redTL+redTC+redTR+redL+redC+redR+redBL+redBC+redBR)/9; //Bluring red columnor value
setRed(newImage, row, column, newRed);
greenTL = getGreen(image, row - 1, column - 1);
greenTC = getGreen(image, row - 1, column);
greenTR = getGreen(image, row - 1, column + 1);
greenL = getGreen(image, row, column - 1);
greenC = getGreen(image, row, column);
greenR = getGreen(image, row, column + 1);
greenBL = getGreen(image, row + 1, column - 1);
greenBC = getGreen(image, row + 1, column);
greenBR = getGreen(image, row + 1, column + 1);
newGreen = (greenTL+greenTC+greenTR+greenL+greenC+greenR+greenBL+greenBC+greenBR)/9; //Bluring green columnor value
setGreen(newImage, row, column, newGreen);
blueTL = getBlue(image, row - 1, column - 1);
blueTC = getBlue(image, row - 1, column);
blueTR = getBlue(image, row - 1, column + 1);
blueL = getBlue(image, row, column - 1);
blueC = getBlue(image, row, column);
blueR = getBlue(image, row, column + 1);
blueBL = getBlue(image, row + 1, column - 1);
blueBC = getBlue(image, row + 1, column);
blueBR = getBlue(image, row + 1, column + 1);
newBlue = (blueTL+blueTC+blueTR+blueL+blueC+blueR+blueBL+blueBC+blueBR)/9; //Bluring blue columnor value
setBlue(newImage, row, column, newBlue);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv)
{
unsigned char *image;
const char *filename = argv[1];
const char *newFileName = "blurred_image.png";
unsigned char *newImage;
unsigned int height = 0, width = 0;
//Decoding Image
lodepng_decode32_file(&image, &width, &height, filename);
newImage = (unsigned char *)malloc(height * width * 4 * sizeof(unsigned char));
//Declaring gpuImage and setting the value
unsigned char * gpuImage;
hipMalloc( (void**) &gpuImage, sizeof(char) * height*width*4);
hipMemcpy(gpuImage, image, sizeof(char) * height*width*4, hipMemcpyHostToDevice);
//Declaring gpuNewImage
unsigned char * gpuNewImage;
hipMalloc( (void**) &gpuNewImage, sizeof(char) * height*width*4);
//Declaring gpuImageWidth and setting the value
unsigned int* gpuWidth;
hipMalloc( (void**) &gpuWidth, sizeof(int));
hipMemcpy(gpuWidth, &width, sizeof(int), hipMemcpyHostToDevice);
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( applyGaussianBlurr), dim3(height-1),dim3(width-1), 0, 0, gpuImage, gpuNewImage, gpuWidth);
hipDeviceSynchronize();
printf("Image width = %d, height = %d\n", width, height);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
//Getting newImage data from gpu
hipMemcpy(newImage, gpuNewImage, sizeof(char) * height * width * 4, hipMemcpyDeviceToHost);
//Encoding image
lodepng_encode32_file(newFileName, newImage, width, height);
return 0;
}
| e0ab60ab53d9a18addac502e2c243479256581ae.cu | #include "lodepng.h"
#include <stdio.h>
#include <stdlib.h>
/********************************************************************************
This CUDA program demonstrates how do process image using CUDA. This program
will take image and performs Gaussian Blur filtering then saves the blurred
image.
Compile with:
nvcc task3_partB.cu -o task3_partB lodepng.cpp
To run:
./task3_partB "input filename"
Author: Sasmita Gurung
University Email: [email protected]
**********************************************************************************/
__device__ unsigned int deviceWidth;
//Getting Red pixels
__device__ unsigned char getRed(unsigned char *image, unsigned int row, unsigned int column)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4);
return image[i];
}
//Getting Green pixels
__device__ unsigned char getGreen(unsigned char *image, unsigned int row, unsigned int column)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 1;
return image[i];
}
//Getting Blue pixels
__device__ unsigned char getBlue(unsigned char *image, unsigned int row, unsigned int column)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 2;
return image[i];
}
//Getting Alpha pixels (transperancy)
__device__ unsigned char getAlpha(unsigned char *image, unsigned int row, unsigned int column)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 3;
return image[i];
}
//Setting Red value
__device__ void setRed(unsigned char *image, unsigned int row, unsigned int column, unsigned char red)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4);
image[i] = red;
}
//Setting Green value
__device__ void setGreen(unsigned char *image, unsigned int row, unsigned int column, unsigned char green)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 1;
image[i] = green;
}
//Setting Blue value
__device__ void setBlue(unsigned char *image, unsigned int row, unsigned int column, unsigned char blue)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 2;
image[i] = blue;
}
//Setting Alpha value
__device__ void setAlpha(unsigned char *image, unsigned int row, unsigned int column, unsigned char alpha)
{
unsigned int i = (row * deviceWidth * 4) + (column * 4) + 3;
image[i] = alpha;
}
__global__ void applyGaussianBlurr(unsigned char* image, unsigned char* newImage, unsigned int *width){
int row = blockIdx.x+1;
int column = threadIdx.x+1;
deviceWidth = *width;
unsigned redTL, redTC, redTR;
unsigned redL, redC, redR;
unsigned redBL, redBC, redBR;
unsigned newRed;
unsigned greenTL, greenTC, greenTR;
unsigned greenL, greenC, greenR;
unsigned greenBL, greenBC, greenBR;
unsigned newGreen;
unsigned blueTL, blueTC, blueTR;
unsigned blueL, blueC, blueR;
unsigned blueBL, blueBC, blueBR;
unsigned newBlue;
setGreen(newImage, row, column, getGreen(image, row, column));
setBlue(newImage, row, column, getBlue(image, row, column));
setAlpha(newImage, row, column, 255);
redTL = getRed(image, row - 1, column - 1);
redTC = getRed(image, row - 1, column);
redTR = getRed(image, row - 1, column + 1);
redL = getRed(image, row, column - 1);
redC = getRed(image, row, column);
redR = getRed(image, row, column + 1);
redBL = getRed(image, row + 1, column - 1);
redBC = getRed(image, row + 1, column);
redBR = getRed(image, row + 1, column + 1);
newRed = (redTL+redTC+redTR+redL+redC+redR+redBL+redBC+redBR)/9; //Bluring red columnor value
setRed(newImage, row, column, newRed);
greenTL = getGreen(image, row - 1, column - 1);
greenTC = getGreen(image, row - 1, column);
greenTR = getGreen(image, row - 1, column + 1);
greenL = getGreen(image, row, column - 1);
greenC = getGreen(image, row, column);
greenR = getGreen(image, row, column + 1);
greenBL = getGreen(image, row + 1, column - 1);
greenBC = getGreen(image, row + 1, column);
greenBR = getGreen(image, row + 1, column + 1);
newGreen = (greenTL+greenTC+greenTR+greenL+greenC+greenR+greenBL+greenBC+greenBR)/9; //Bluring green columnor value
setGreen(newImage, row, column, newGreen);
blueTL = getBlue(image, row - 1, column - 1);
blueTC = getBlue(image, row - 1, column);
blueTR = getBlue(image, row - 1, column + 1);
blueL = getBlue(image, row, column - 1);
blueC = getBlue(image, row, column);
blueR = getBlue(image, row, column + 1);
blueBL = getBlue(image, row + 1, column - 1);
blueBC = getBlue(image, row + 1, column);
blueBR = getBlue(image, row + 1, column + 1);
newBlue = (blueTL+blueTC+blueTR+blueL+blueC+blueR+blueBL+blueBC+blueBR)/9; //Bluring blue columnor value
setBlue(newImage, row, column, newBlue);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv)
{
unsigned char *image;
const char *filename = argv[1];
const char *newFileName = "blurred_image.png";
unsigned char *newImage;
unsigned int height = 0, width = 0;
//Decoding Image
lodepng_decode32_file(&image, &width, &height, filename);
newImage = (unsigned char *)malloc(height * width * 4 * sizeof(unsigned char));
//Declaring gpuImage and setting the value
unsigned char * gpuImage;
cudaMalloc( (void**) &gpuImage, sizeof(char) * height*width*4);
cudaMemcpy(gpuImage, image, sizeof(char) * height*width*4, cudaMemcpyHostToDevice);
//Declaring gpuNewImage
unsigned char * gpuNewImage;
cudaMalloc( (void**) &gpuNewImage, sizeof(char) * height*width*4);
//Declaring gpuImageWidth and setting the value
unsigned int* gpuWidth;
cudaMalloc( (void**) &gpuWidth, sizeof(int));
cudaMemcpy(gpuWidth, &width, sizeof(int), cudaMemcpyHostToDevice);
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
applyGaussianBlurr<<<height-1,width-1>>>(gpuImage, gpuNewImage, gpuWidth);
cudaDeviceSynchronize();
printf("Image width = %d, height = %d\n", width, height);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
//Getting newImage data from gpu
cudaMemcpy(newImage, gpuNewImage, sizeof(char) * height * width * 4, cudaMemcpyDeviceToHost);
//Encoding image
lodepng_encode32_file(newFileName, newImage, width, height);
return 0;
}
|
7220a99e0deae18f402fec78fe9c16ab222f6ec4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS_PER_BLOCK 128
#include <random>
#include <iostream>
void print_array(float *p, int n) {
std::cout << n << " elements" << std::endl;
for (int i = 0 ; i < n ; i++) {
std::cout << *(p+i) << " ";
}
std::cout << std::endl << std::endl;
}
__global__ void convolve(int N, float *input, float *output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
float result = 0;
for (int i = 0 ; i < 3 ; i++) {
result += input[index + i];
}
output[index] = result / 3.0;
}
}
int main(int argc, char* argv[]) {
// define the size of the input array
int N = 1024;
// create pointers for the CPU arrays
float *input = new float[N+2];
float *output = new float[N];
// generate data randomly and store it in the CPU
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> mydist(1,10);
for (int i = 0 ; i < (N+2) ; i ++) {
input[i] = mydist(gen);
}
print_array(input, N+2);
// create pointers for the CUDA arrays
float *dev_in;
float *dev_out;
// variable to check for CUDA errors
hipError_t status;
// choose GPU to run
status = hipSetDevice(0);
if (status != hipSuccess) std::cerr << "hipSetDevice failed!" << std::endl;
// allocate space for the arrays in the GPU
status = hipMalloc(&dev_in, sizeof(float) * (N+2));
if (status != hipSuccess) std::cerr << "hipMalloc (in) failed!" << std::endl;
status = hipMalloc(&dev_out, sizeof(float) * N);
if (status != hipSuccess) std::cerr << "hipMalloc (out) failed!" << std::endl;
// transfer data from CPU to GPU
status = hipMemcpy(dev_in, input, sizeof(float) * (N+2), hipMemcpyHostToDevice);
if (status != hipSuccess) std::cerr << "hipMemcpy H2D failed!" << std::endl;
// do the work in the GPU
hipLaunchKernelGGL(( convolve), dim3(::ceil((float)N/THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, 0, N, dev_in, dev_out);
// wait for the kernel to finish, and check for errors
status = hipDeviceSynchronize();
if (status != hipSuccess) std::cerr << "error code " << status << " returned after kernel!" << std::endl;
// transfer results from GPU to CPU
status = hipMemcpy(output, dev_out, sizeof(float) * N, hipMemcpyDeviceToHost);
if (status != hipSuccess) std::cerr << "hipMemcpy D2H failed!" << std::endl;
print_array(output, N);
// free the memory allocated in the CPU
delete [] input;
delete [] output;
// free the memory allocated in the GPU
hipFree(dev_in);
hipFree(dev_out);
return 0;
}
| 7220a99e0deae18f402fec78fe9c16ab222f6ec4.cu | #define THREADS_PER_BLOCK 128
#include <random>
#include <iostream>
void print_array(float *p, int n) {
std::cout << n << " elements" << std::endl;
for (int i = 0 ; i < n ; i++) {
std::cout << *(p+i) << " ";
}
std::cout << std::endl << std::endl;
}
__global__ void convolve(int N, float *input, float *output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
float result = 0;
for (int i = 0 ; i < 3 ; i++) {
result += input[index + i];
}
output[index] = result / 3.0;
}
}
int main(int argc, char* argv[]) {
// define the size of the input array
int N = 1024;
// create pointers for the CPU arrays
float *input = new float[N+2];
float *output = new float[N];
// generate data randomly and store it in the CPU
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> mydist(1,10);
for (int i = 0 ; i < (N+2) ; i ++) {
input[i] = mydist(gen);
}
print_array(input, N+2);
// create pointers for the CUDA arrays
float *dev_in;
float *dev_out;
// variable to check for CUDA errors
cudaError_t status;
// choose GPU to run
status = cudaSetDevice(0);
if (status != cudaSuccess) std::cerr << "cudaSetDevice failed!" << std::endl;
// allocate space for the arrays in the GPU
status = cudaMalloc(&dev_in, sizeof(float) * (N+2));
if (status != cudaSuccess) std::cerr << "cudaMalloc (in) failed!" << std::endl;
status = cudaMalloc(&dev_out, sizeof(float) * N);
if (status != cudaSuccess) std::cerr << "cudaMalloc (out) failed!" << std::endl;
// transfer data from CPU to GPU
status = cudaMemcpy(dev_in, input, sizeof(float) * (N+2), cudaMemcpyHostToDevice);
if (status != cudaSuccess) std::cerr << "cudaMemcpy H2D failed!" << std::endl;
// do the work in the GPU
convolve<<<std::ceil((float)N/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(N, dev_in, dev_out);
// wait for the kernel to finish, and check for errors
status = cudaThreadSynchronize();
if (status != cudaSuccess) std::cerr << "error code " << status << " returned after kernel!" << std::endl;
// transfer results from GPU to CPU
status = cudaMemcpy(output, dev_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) std::cerr << "cudaMemcpy D2H failed!" << std::endl;
print_array(output, N);
// free the memory allocated in the CPU
delete [] input;
delete [] output;
// free the memory allocated in the GPU
cudaFree(dev_in);
cudaFree(dev_out);
return 0;
}
|
fde805599ea9093c6af9ad0023abfaa01c395f1f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#define WIN32
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDAk2l4(float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub1 = 0;
float Csub2 = 0;
float Csub3 = 0;
float Csub4 = 0;
float Csub5 = 0;
float Csub6 = 0;
float Csub7 = 0;
float Csub8 = 0;
__shared__ float As1[2 * BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs1[BLOCK_SIZE][4 * BLOCK_SIZE];
__shared__ float As2[2 * BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs2[BLOCK_SIZE][4 * BLOCK_SIZE];
float(*currentAs)[BLOCK_SIZE];
float(*currentBs)[4 * BLOCK_SIZE];
float(*nextAs)[BLOCK_SIZE];
float(*nextBs)[4 * BLOCK_SIZE];
float(*tempA)[BLOCK_SIZE];
float(*tempB)[4 * BLOCK_SIZE];
As1[ty][tx] = A[aBegin + wA * ty + tx];
As1[ty + BLOCK_SIZE][tx] = A[aBegin + wA * (ty + wB / 2) + tx];
Bs1[ty][tx] = B[bBegin + wB * ty + tx];
Bs1[ty][tx + BLOCK_SIZE] = B[bBegin + wA * (ty)+tx + wB / 4];
Bs1[ty][tx + 2*BLOCK_SIZE] = B[bBegin + wA * (ty)+tx + 2*wB / 4];
Bs1[ty][tx + 3*BLOCK_SIZE] = B[bBegin + wA * (ty)+tx + 3*wB / 4];
currentAs = As2;
currentBs = Bs2;
nextAs = As1;
nextBs = Bs1;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
tempA = currentAs;
currentAs = nextAs;
nextAs = tempA;
tempB = currentBs;
currentBs = nextBs;
nextBs = tempB;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub1 += currentAs[ty][k] * currentBs[k][tx];
Csub2 += currentAs[ty][k] * currentBs[k][tx+BLOCK_SIZE];
Csub3 += currentAs[ty][k] * currentBs[k][tx + 2* BLOCK_SIZE];
Csub4 += currentAs[ty][k] * currentBs[k][tx + 3* BLOCK_SIZE];
Csub5 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx];
Csub6 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx+ BLOCK_SIZE];
Csub7 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx + 2* BLOCK_SIZE];
Csub8 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx + 3* BLOCK_SIZE];
}
__syncthreads();
nextAs[ty][tx] = A[a + aStep + wA * ty + tx];
nextAs[ty + BLOCK_SIZE][tx] = A[a + aStep + wA * (ty + wB / 2) + tx];
nextBs[ty][tx] = B[b + bStep + wB * ty + tx];
nextBs[ty][tx + BLOCK_SIZE] = B[b + bStep + wB * ty + tx + wB / 4];
nextBs[ty][tx + 2*BLOCK_SIZE] = B[b + bStep + wB * ty + tx + 2*wB / 4];
nextBs[ty][tx + 3*BLOCK_SIZE] = B[b + bStep + wB * ty + tx + 3*wB / 4];
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub1;
C[c + wB * ty + tx + wB/4] = Csub2;
C[c + wB * ty + tx + 2*wB / 4] = Csub3;
C[c + wB * ty + tx + 3*wB / 4] = Csub4;
C[c + wB * (ty + wB / 2) + tx] = Csub5;
C[c + wB * (ty + wB / 2) + tx + wB / 4] = Csub6;
C[c + wB * (ty + wB / 2) + tx + 2*wB / 4] = Csub7;
C[c + wB * (ty + wB / 2) + tx + 3*wB / 4] = Csub8;
}
template <int BLOCK_SIZE> __global__ void
matrixMulCUDAk2l2(float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub1 = 0;
float Csub2 = 0;
float Csub3 = 0;
float Csub4 = 0;
__shared__ float As1[2 * BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs1[BLOCK_SIZE][2*BLOCK_SIZE];
__shared__ float As2[2 * BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs2[BLOCK_SIZE][2*BLOCK_SIZE];
float(*currentAs)[BLOCK_SIZE];
float(*currentBs)[2*BLOCK_SIZE];
float(*nextAs)[BLOCK_SIZE];
float(*nextBs)[2*BLOCK_SIZE];
float(*tempA)[BLOCK_SIZE];
float(*tempB)[2*BLOCK_SIZE];
As1[ty][tx] = A[aBegin + wA * ty + tx];
As1[ty + BLOCK_SIZE][tx] = A[aBegin + wA * (ty + wB / 2) + tx];
Bs1[ty][tx] = B[bBegin + wB * ty + tx];
Bs1[ty][tx + BLOCK_SIZE] = B[bBegin + wA * (ty) + tx + wB / 2];
currentAs = As2;
currentBs = Bs2;
nextAs = As1;
nextBs = Bs1;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
tempA = currentAs;
currentAs = nextAs;
nextAs = tempA;
tempB = currentBs;
currentBs = nextBs;
nextBs = tempB;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub1 += currentAs[ty][k] * currentBs[k][tx];
Csub2 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx];
Csub3 += currentAs[ty][k] * currentBs[k][tx+BLOCK_SIZE];
Csub4 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx + BLOCK_SIZE];
}
__syncthreads();
nextAs[ty][tx] = A[a + aStep + wA * ty + tx];
nextAs[ty + BLOCK_SIZE][tx] = A[a + aStep + wA * (ty + wB / 2) + tx];
nextBs[ty][tx] = B[b + bStep + wB * ty + tx];
nextBs[ty][tx + BLOCK_SIZE] = B[b + bStep + wB * ty + tx + wB / 2];
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub1;
C[c + wB * (ty + wB / 2) + tx] = Csub2;
C[c + wB * ty + tx + wB/2] = Csub3;
C[c + wB * (ty + wB / 2) + tx + wB / 2] = Csub4;
}
template <int BLOCK_SIZE> __global__ void
matrixMulCUDAk2l1(float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub1 = 0;
float Csub2 = 0;
__shared__ float As1[2*BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float As2[2 * BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs2[BLOCK_SIZE][BLOCK_SIZE];
float(*currentAs)[BLOCK_SIZE];
float(*currentBs)[BLOCK_SIZE];
float(*nextAs)[BLOCK_SIZE];
float(*nextBs)[BLOCK_SIZE];
float(*temp)[BLOCK_SIZE];
As1[ty][tx] = A[aBegin + wA * ty + tx];
As1[ty+ BLOCK_SIZE][tx] = A[aBegin + wA * (ty+ wB / 2) + tx];
Bs1[ty][tx] = B[bBegin + wB * ty + tx];
currentAs = As2;
currentBs = Bs2;
nextAs = As1;
nextBs = Bs1;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
temp = currentAs;
currentAs = nextAs;
nextAs = temp;
temp = currentBs;
currentBs = nextBs;
nextBs = temp;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub1 += currentAs[ty][k] * currentBs[k][tx];
Csub2 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx];
}
__syncthreads();
nextAs[ty][tx] = A[a + aStep + wA * ty + tx];
nextAs[ty + BLOCK_SIZE][tx] = A[a + aStep + wA * (ty + wB / 2) + tx];
nextBs[ty][tx] = B[b + bStep + wB * ty + tx];
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub1;
C[c + wB * (ty + wB/2) + tx] = Csub2;
}
template <int BLOCK_SIZE> __global__ void
matrixMulCUDAk1l1(float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = 0;
__shared__ float As1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float As2[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs2[BLOCK_SIZE][BLOCK_SIZE];
float(*currentAs)[BLOCK_SIZE];
float(*currentBs)[BLOCK_SIZE];
float(*nextAs)[BLOCK_SIZE];
float(*nextBs)[BLOCK_SIZE];
float(*temp)[BLOCK_SIZE];
As1[ty][tx] = A[aBegin + wA * ty + tx];
Bs1[ty][tx] = B[bBegin + wB * ty + tx];
currentAs = As2;
currentBs = Bs2;
nextAs = As1;
nextBs = Bs1;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
temp = currentAs;
currentAs = nextAs;
nextAs = temp;
temp = currentBs;
currentBs = nextBs;
nextBs = temp;
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += currentAs[ty][k] * currentBs[k][tx];
}
__syncthreads();
nextAs[ty][tx] = A[a + aStep + wA * ty + tx];
nextBs[ty][tx] = B[b + bStep + wB * ty + tx];
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float* data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int block_size, dim3& dimsA, dim3& dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float* d_A, * d_B, * d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float* h_C = (float*)malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void**)&d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void**)&d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void**)&d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y );
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 10;
for (int j = 0; j < nIter; j++)
{
if (block_size == 8) {
matrixMulCUDAk1l1<8> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDAk1l1<16>) , dim3(grid), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y/2);
//matrixMulCUDAk2l1<16> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x/2, dimsA.y / threads.y/2);
//matrixMulCUDAk2l2<16> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x / 4, dimsA.y / threads.y / 2);
//matrixMulCUDAk2l4<16> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDAk1l1<32>) , dim3(grid), dim3(threads), 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y/2);
//matrixMulCUDAk2l1<32> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x/2, dimsA.y / threads.y/2);
//matrixMulCUDAk2l2<32> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x / 4, dimsA.y / threads.y / 2);
//matrixMulCUDAk2l4<32> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
hipError_t err = hipGetLastError(); if (err != hipSuccess) {
printf("CUDA Error: %s\n", hipGetErrorString(err));
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-3)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x * valB);
correct = false;
}
}
printf("%s\n", correct ? "OK" : "FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
int cpuMatrixMul() {
return 0;
}
/**
* Program main
*/
int main(int argc, char** argv)
{
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
hipError_t error;
hipDeviceProp_t deviceProp;
hipSetDevice(devID);
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
int block_size = 16;
dim3 dimsA(1024, 1024, 1);
dim3 dimsB(1024, 1024, 1);
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(block_size, dimsA, dimsB);
exit(matrix_result);
}
| fde805599ea9093c6af9ad0023abfaa01c395f1f.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#define WIN32
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDAk2l4(float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub1 = 0;
float Csub2 = 0;
float Csub3 = 0;
float Csub4 = 0;
float Csub5 = 0;
float Csub6 = 0;
float Csub7 = 0;
float Csub8 = 0;
__shared__ float As1[2 * BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs1[BLOCK_SIZE][4 * BLOCK_SIZE];
__shared__ float As2[2 * BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs2[BLOCK_SIZE][4 * BLOCK_SIZE];
float(*currentAs)[BLOCK_SIZE];
float(*currentBs)[4 * BLOCK_SIZE];
float(*nextAs)[BLOCK_SIZE];
float(*nextBs)[4 * BLOCK_SIZE];
float(*tempA)[BLOCK_SIZE];
float(*tempB)[4 * BLOCK_SIZE];
As1[ty][tx] = A[aBegin + wA * ty + tx];
As1[ty + BLOCK_SIZE][tx] = A[aBegin + wA * (ty + wB / 2) + tx];
Bs1[ty][tx] = B[bBegin + wB * ty + tx];
Bs1[ty][tx + BLOCK_SIZE] = B[bBegin + wA * (ty)+tx + wB / 4];
Bs1[ty][tx + 2*BLOCK_SIZE] = B[bBegin + wA * (ty)+tx + 2*wB / 4];
Bs1[ty][tx + 3*BLOCK_SIZE] = B[bBegin + wA * (ty)+tx + 3*wB / 4];
currentAs = As2;
currentBs = Bs2;
nextAs = As1;
nextBs = Bs1;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
tempA = currentAs;
currentAs = nextAs;
nextAs = tempA;
tempB = currentBs;
currentBs = nextBs;
nextBs = tempB;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub1 += currentAs[ty][k] * currentBs[k][tx];
Csub2 += currentAs[ty][k] * currentBs[k][tx+BLOCK_SIZE];
Csub3 += currentAs[ty][k] * currentBs[k][tx + 2* BLOCK_SIZE];
Csub4 += currentAs[ty][k] * currentBs[k][tx + 3* BLOCK_SIZE];
Csub5 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx];
Csub6 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx+ BLOCK_SIZE];
Csub7 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx + 2* BLOCK_SIZE];
Csub8 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx + 3* BLOCK_SIZE];
}
__syncthreads();
nextAs[ty][tx] = A[a + aStep + wA * ty + tx];
nextAs[ty + BLOCK_SIZE][tx] = A[a + aStep + wA * (ty + wB / 2) + tx];
nextBs[ty][tx] = B[b + bStep + wB * ty + tx];
nextBs[ty][tx + BLOCK_SIZE] = B[b + bStep + wB * ty + tx + wB / 4];
nextBs[ty][tx + 2*BLOCK_SIZE] = B[b + bStep + wB * ty + tx + 2*wB / 4];
nextBs[ty][tx + 3*BLOCK_SIZE] = B[b + bStep + wB * ty + tx + 3*wB / 4];
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub1;
C[c + wB * ty + tx + wB/4] = Csub2;
C[c + wB * ty + tx + 2*wB / 4] = Csub3;
C[c + wB * ty + tx + 3*wB / 4] = Csub4;
C[c + wB * (ty + wB / 2) + tx] = Csub5;
C[c + wB * (ty + wB / 2) + tx + wB / 4] = Csub6;
C[c + wB * (ty + wB / 2) + tx + 2*wB / 4] = Csub7;
C[c + wB * (ty + wB / 2) + tx + 3*wB / 4] = Csub8;
}
template <int BLOCK_SIZE> __global__ void
matrixMulCUDAk2l2(float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub1 = 0;
float Csub2 = 0;
float Csub3 = 0;
float Csub4 = 0;
__shared__ float As1[2 * BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs1[BLOCK_SIZE][2*BLOCK_SIZE];
__shared__ float As2[2 * BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs2[BLOCK_SIZE][2*BLOCK_SIZE];
float(*currentAs)[BLOCK_SIZE];
float(*currentBs)[2*BLOCK_SIZE];
float(*nextAs)[BLOCK_SIZE];
float(*nextBs)[2*BLOCK_SIZE];
float(*tempA)[BLOCK_SIZE];
float(*tempB)[2*BLOCK_SIZE];
As1[ty][tx] = A[aBegin + wA * ty + tx];
As1[ty + BLOCK_SIZE][tx] = A[aBegin + wA * (ty + wB / 2) + tx];
Bs1[ty][tx] = B[bBegin + wB * ty + tx];
Bs1[ty][tx + BLOCK_SIZE] = B[bBegin + wA * (ty) + tx + wB / 2];
currentAs = As2;
currentBs = Bs2;
nextAs = As1;
nextBs = Bs1;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
tempA = currentAs;
currentAs = nextAs;
nextAs = tempA;
tempB = currentBs;
currentBs = nextBs;
nextBs = tempB;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub1 += currentAs[ty][k] * currentBs[k][tx];
Csub2 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx];
Csub3 += currentAs[ty][k] * currentBs[k][tx+BLOCK_SIZE];
Csub4 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx + BLOCK_SIZE];
}
__syncthreads();
nextAs[ty][tx] = A[a + aStep + wA * ty + tx];
nextAs[ty + BLOCK_SIZE][tx] = A[a + aStep + wA * (ty + wB / 2) + tx];
nextBs[ty][tx] = B[b + bStep + wB * ty + tx];
nextBs[ty][tx + BLOCK_SIZE] = B[b + bStep + wB * ty + tx + wB / 2];
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub1;
C[c + wB * (ty + wB / 2) + tx] = Csub2;
C[c + wB * ty + tx + wB/2] = Csub3;
C[c + wB * (ty + wB / 2) + tx + wB / 2] = Csub4;
}
template <int BLOCK_SIZE> __global__ void
matrixMulCUDAk2l1(float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub1 = 0;
float Csub2 = 0;
__shared__ float As1[2*BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float As2[2 * BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs2[BLOCK_SIZE][BLOCK_SIZE];
float(*currentAs)[BLOCK_SIZE];
float(*currentBs)[BLOCK_SIZE];
float(*nextAs)[BLOCK_SIZE];
float(*nextBs)[BLOCK_SIZE];
float(*temp)[BLOCK_SIZE];
As1[ty][tx] = A[aBegin + wA * ty + tx];
As1[ty+ BLOCK_SIZE][tx] = A[aBegin + wA * (ty+ wB / 2) + tx];
Bs1[ty][tx] = B[bBegin + wB * ty + tx];
currentAs = As2;
currentBs = Bs2;
nextAs = As1;
nextBs = Bs1;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
temp = currentAs;
currentAs = nextAs;
nextAs = temp;
temp = currentBs;
currentBs = nextBs;
nextBs = temp;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub1 += currentAs[ty][k] * currentBs[k][tx];
Csub2 += currentAs[ty + BLOCK_SIZE][k] * currentBs[k][tx];
}
__syncthreads();
nextAs[ty][tx] = A[a + aStep + wA * ty + tx];
nextAs[ty + BLOCK_SIZE][tx] = A[a + aStep + wA * (ty + wB / 2) + tx];
nextBs[ty][tx] = B[b + bStep + wB * ty + tx];
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub1;
C[c + wB * (ty + wB/2) + tx] = Csub2;
}
template <int BLOCK_SIZE> __global__ void
matrixMulCUDAk1l1(float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = 0;
__shared__ float As1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float As2[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs2[BLOCK_SIZE][BLOCK_SIZE];
float(*currentAs)[BLOCK_SIZE];
float(*currentBs)[BLOCK_SIZE];
float(*nextAs)[BLOCK_SIZE];
float(*nextBs)[BLOCK_SIZE];
float(*temp)[BLOCK_SIZE];
As1[ty][tx] = A[aBegin + wA * ty + tx];
Bs1[ty][tx] = B[bBegin + wB * ty + tx];
currentAs = As2;
currentBs = Bs2;
nextAs = As1;
nextBs = Bs1;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
temp = currentAs;
currentAs = nextAs;
nextAs = temp;
temp = currentBs;
currentBs = nextBs;
nextBs = temp;
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += currentAs[ty][k] * currentBs[k][tx];
}
__syncthreads();
nextAs[ty][tx] = A[a + aStep + wA * ty + tx];
nextBs[ty][tx] = B[b + bStep + wB * ty + tx];
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float* data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int block_size, dim3& dimsA, dim3& dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float* d_A, * d_B, * d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float* h_C = (float*)malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void**)&d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void**)&d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void**)&d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y );
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 10;
for (int j = 0; j < nIter; j++)
{
if (block_size == 8) {
matrixMulCUDAk1l1<8> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else if (block_size == 16)
{
matrixMulCUDAk1l1<16> <<<grid, threads>>> (d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y/2);
//matrixMulCUDAk2l1<16> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x/2, dimsA.y / threads.y/2);
//matrixMulCUDAk2l2<16> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x / 4, dimsA.y / threads.y / 2);
//matrixMulCUDAk2l4<16> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDAk1l1<32> <<<grid, threads>>> (d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y/2);
//matrixMulCUDAk2l1<32> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x/2, dimsA.y / threads.y/2);
//matrixMulCUDAk2l2<32> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
//dim3 grid(dimsB.x / threads.x / 4, dimsA.y / threads.y / 2);
//matrixMulCUDAk2l4<32> << <grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) {
printf("CUDA Error: %s\n", cudaGetErrorString(err));
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-3)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x * valB);
correct = false;
}
}
printf("%s\n", correct ? "OK" : "FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
int cpuMatrixMul() {
return 0;
}
/**
* Program main
*/
int main(int argc, char** argv)
{
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
cudaError_t error;
cudaDeviceProp deviceProp;
cudaSetDevice(devID);
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
int block_size = 16;
dim3 dimsA(1024, 1024, 1);
dim3 dimsB(1024, 1024, 1);
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(block_size, dimsA, dimsB);
exit(matrix_result);
}
|
cf7d5041ec60c3bceacf2eba24fe2cf841b515a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/psroi_pooling_layer.hpp"
#include "caffe/util/gpu_util.cuh"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void PSROIPoolingForward(
const int nthreads,
const Dtype* bottom_data,
const Dtype spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois,
const int output_dim,
const int group_size,
Dtype* top_data,
int* mapping_channel) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) *
spatial_scale;
Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) *
spatial_scale;
Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) *
spatial_scale;
Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) *
spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop * group_size + gh) * group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
Dtype out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
out_sum += bottom_data[bottom_index];
}
}
Dtype bin_area = (hend - hstart) * (wend - wstart);
top_data[index] = is_empty ? 0. : out_sum / bin_area;
mapping_channel[index] = c;
}
}
template <typename Dtype>
void PSROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data();
int count = top[0]->count();
caffe_gpu_set(count, Dtype(0), top_data);
caffe_gpu_set(count, -1, mapping_channel_ptr);
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, output_dim_,
group_size_, top_data, mapping_channel_ptr);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PSROIPoolingBackwardAtomic(
const int nthreads, const Dtype* top_diff,
const int* mapping_channel, const int num_rois,
const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim, Dtype* bottom_diff, const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) *
spatial_scale;
Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) *
spatial_scale;
Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) *
spatial_scale;
Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) *
spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
Dtype* offset_bottom_diff = bottom_diff +
(roi_batch_ind * channels + c) * height * width;
Dtype bin_area = (hend - hstart) * (wend - wstart);
Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
}
}
}
}
template <typename Dtype>
void PSROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
const int* mapping_channel_ptr = mapping_channel_.gpu_data();
caffe_gpu_set(bottom[1]->count(), Dtype(0.), bottom[1]->mutable_gpu_diff());
caffe_gpu_set(bottom_count, Dtype(0.), bottom_diff);
const int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_,
channels_, height_, width_,
pooled_height_, pooled_width_, output_dim_,
bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PSROIPoolingLayer);
} // namespace caffe
| cf7d5041ec60c3bceacf2eba24fe2cf841b515a6.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/psroi_pooling_layer.hpp"
#include "caffe/util/gpu_util.cuh"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void PSROIPoolingForward(
const int nthreads,
const Dtype* bottom_data,
const Dtype spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois,
const int output_dim,
const int group_size,
Dtype* top_data,
int* mapping_channel) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) *
spatial_scale;
Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) *
spatial_scale;
Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) *
spatial_scale;
Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) *
spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop * group_size + gh) * group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
Dtype out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
out_sum += bottom_data[bottom_index];
}
}
Dtype bin_area = (hend - hstart) * (wend - wstart);
top_data[index] = is_empty ? 0. : out_sum / bin_area;
mapping_channel[index] = c;
}
}
template <typename Dtype>
void PSROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data();
int count = top[0]->count();
caffe_gpu_set(count, Dtype(0), top_data);
caffe_gpu_set(count, -1, mapping_channel_ptr);
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, output_dim_,
group_size_, top_data, mapping_channel_ptr);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PSROIPoolingBackwardAtomic(
const int nthreads, const Dtype* top_diff,
const int* mapping_channel, const int num_rois,
const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim, Dtype* bottom_diff, const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) *
spatial_scale;
Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) *
spatial_scale;
Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) *
spatial_scale;
Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) *
spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
Dtype* offset_bottom_diff = bottom_diff +
(roi_batch_ind * channels + c) * height * width;
Dtype bin_area = (hend - hstart) * (wend - wstart);
Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
}
}
}
}
template <typename Dtype>
void PSROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
const int* mapping_channel_ptr = mapping_channel_.gpu_data();
caffe_gpu_set(bottom[1]->count(), Dtype(0.), bottom[1]->mutable_gpu_diff());
caffe_gpu_set(bottom_count, Dtype(0.), bottom_diff);
const int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_,
channels_, height_, width_,
pooled_height_, pooled_width_, output_dim_,
bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PSROIPoolingLayer);
} // namespace caffe
|
0f043e56f945029a21c4d44da49159deec30221c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../include/data.h"
#include "../include/RPCFKernels.cuh"
#include "../include/transform.cuh"
#include "../include/operation.h"
#include "../include/cuRPCF.h"
#include <cstdlib>
#include <math.h>
#include <cassert>
#include <iostream>
#include "../include/rhs.cuh"
#include "../include/transform_multi_gpu.h"
#include "../include/velocity.h"
__host__ void getDeviceInfo(problem& pb) {
hipDeviceProp_t prop;
int dev_num;
int n_dev;
size_t free;
size_t total;
launch_subthread(pb);
hipGetDevice(&dev_num);
hipGetDeviceProperties(&prop, dev_num);
hipMemGetInfo(&free, &total);
hipGetDeviceCount(&n_dev);
//err = hipDeviceReset();
//ASSERT(err == hipSuccess);
printf("Using CUDA device %u. Device ID: %s on PCI-E %d\n",
dev_num, prop.name, prop.pciBusID);
printf("GPU total memory = % .2f MB\n", (float)total / (1.024e6));
printf("GPU free memory = % .2f MB\n", (float)free / (1.024e6));
printf("Total device number = :%d\n\n", n_dev);
//for (int i = 0; i < NUM_GPU; i++) {
// dev_id[i] = i%n_dev;
// assert(dev_id[0] == dev_num);
//}
//for (int i = 0; i < n_dev; i++) {
// hipDeviceEnablePeerAccess(i, 0);
//}
//int accessibleTest;
//hipDeviceCanAccessPeer(&accessibleTest, dev_id[0], dev_id[1]);
//if (accessibleTest != 1) { std::cerr << "peer access not supported" << std::endl; };
}
__host__ int allocDeviceMem(problem& pb) {
hipError_t err;
pb.extent = make_hipExtent(
2*(pb.mx/2+1) * sizeof(REAL), pb.my, pb.mz);
pb.tExtent = make_hipExtent(
pb.mz * sizeof(cuRPCF::complex), pb.nx/2+1, pb.ny);
pb.pExtent = make_hipExtent(
2 * (pb.mx / 2 + 1) * sizeof(REAL), pb.my, pb.pz);
// hipExtent & extent = pb.extent;
hipExtent & tExtent = pb.tExtent;
hipExtent & pExtent = pb.pExtent;
// Get pitch value of the pointer.
err = hipMalloc3D(&(pb.dptr_tu), tExtent);
pb.tPitch = pb.dptr_tu.pitch;
safeCudaFree(pb.dptr_tu.ptr);
pb.dptr_tu.ptr = nullptr;
initMyCudaMalloc(dim3(pb.mx, pb.my, pb.mz));
//cuCheck(hipMalloc3D(&(pb.dptr_u), pExtent),"allocate");
//cuCheck(hipMalloc3D(&(pb.dptr_v), pExtent), "allocate");
//cuCheck(hipMalloc3D(&(pb.dptr_w), pExtent), "allocate");
//cuCheck(hipMalloc3D(&(pb.dptr_omega_x), pExtent), "allocate");
//cuCheck(hipMalloc3D(&(pb.dptr_omega_y), pExtent), "allocate");
//cuCheck(hipMalloc3D(&(pb.dptr_omega_z), pExtent), "allocate");
cuCheck(myCudaMalloc(pb.dptr_u, XYZ_3D), "allocate");
cuCheck(myCudaMalloc(pb.dptr_v, XYZ_3D), "allocate");
cuCheck(myCudaMalloc(pb.dptr_w, XYZ_3D), "allocate");
cuCheck(myCudaMalloc(pb.dptr_omega_x, XYZ_3D), "allocate");
cuCheck(myCudaMalloc(pb.dptr_omega_y, XYZ_3D), "allocate");
cuCheck(myCudaMalloc(pb.dptr_omega_z, XYZ_3D), "allocate");
//cuCheck(hipMalloc3D(&(pb.dptr_lamb_x), extent), "allocate");
//cuCheck(hipMalloc3D(&(pb.dptr_lamb_y), extent), "allocate");
//cuCheck(hipMalloc3D(&(pb.dptr_lamb_z), extent), "allocate");
pb.tSize = pb.tPitch * (pb.nx / 2 + 1) * pb.ny;
// size_t& tsize = pb.tSize;
//pb.nonlinear_v = (cuRPCF::complex*)malloc(tsize);
//pb.nonlinear_v_p = (cuRPCF::complex*)malloc(tsize);
//pb.nonlinear_omega_y = (cuRPCF::complex*)malloc(tsize);
//pb.nonlinear_omega_y_p = (cuRPCF::complex*)malloc(tsize);
//ASSERT(pb.nonlinear_v != nullptr);
//ASSERT(pb.nonlinear_v_p != nullptr);
//ASSERT(pb.nonlinear_omega_y != nullptr);
//ASSERT(pb.nonlinear_omega_y_p != nullptr);
//err = hipMalloc3D(&(pb.dptr_tv), tExtent);
//err = hipMalloc3D(&(pb.dptr_tw), tExtent);
//err = hipMalloc3D(&(pb.dptr_tomega_x), tExtent);
//err = hipMalloc3D(&(pb.dptr_tomega_y), tExtent);
//err = hipMalloc3D(&(pb.dptr_tomega_z), tExtent);
//err = hipMalloc3D(&(pb.dptr_tLamb_x), tExtent);
//err = hipMalloc3D(&(pb.dptr_tLamb_y), tExtent);
//err = hipMalloc3D(&(pb.dptr_tLamb_z), tExtent);
pb.dptr_tu.ptr = nullptr;
pb.dptr_tv.ptr = nullptr;
pb.dptr_tw.ptr = nullptr;
pb.dptr_tomega_x.ptr = nullptr;
pb.dptr_tomega_y.ptr = nullptr;
pb.dptr_tomega_z.ptr = nullptr;
pb.dptr_lamb_x.ptr = nullptr;
pb.dptr_lamb_y.ptr = nullptr;
pb.dptr_lamb_z.ptr = nullptr;
pb.dptr_tLamb_x.ptr = nullptr;
pb.dptr_tLamb_y.ptr = nullptr;
pb.dptr_tLamb_z.ptr = nullptr;
pb.pitch = pb.dptr_u.pitch;
pb.size = pb.pitch * pb.my * pb.mz;
pb.pSize = pb.pitch * pb.my * pb.pz;
ASSERT(!err);
return 0;
}
// note : x and y should be normalized by lx and ly.
// i.e. x = x/lx
#define EPSILON_INIT 0.005
// MARK :: this part is the modified CORRECT initial condition, remove comment mark before use
// ____________________________ BEGIN________________________//
//__device__ REAL _get_init_u(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL PI = 4*atan(1.0);
// return EPSILON_INIT*lx*sin(PI*z)
// *(cos(2 * PI*x / lx)*sin(2.0*PI*y / ly)
// + 0.5*cos(4.0*PI*x / lx)*sin(2 * PI*y / ly)
// + cos(2 * PI*x / lx)*sin(4 * PI*y / ly));
// //return sin(PI*x)*cos(2*PI*y);
// //return (-2.0 / 3.0 *lx *(1.0 + cos(1.5*PI*z))*(sin(2.0*PI*x)
// // *sin(2.0*PI*y) + sin(4.0*PI*x)
// // *sin(2.0*PI*y) + sin(2.0*PI*x)
// // *sin(4.0*PI*y)));
//}
//
//__device__ REAL _get_init_v(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL PI = 4 * atan(1.0);
// return -EPSILON_INIT*ly*sin(PI*z)
// *(0.5*sin(2 * PI*x / lx)*cos(2.0*PI*y / ly)
// + 0.5*sin(4.0*PI*x / lx)*cos(2.0 * PI*y / ly)
// + 0.25*sin(2.0 * PI*x / lx)*cos(4.0 * PI*y / ly));
// //return -2.00 / 3.0*(1.0 + cos(1.5*PI*z))*(sin(2.0*PI*x)
//// *sin(2.0*PI*y) + sin(4.0*PI*x)
// // *sin(2.0*PI*y) + sin(2.0*PI*x)
// // *sin(4.0*PI*y));
//}
//
//__device__ REAL _get_init_w(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL PI = 4 * atan(1.0);
// return EPSILON_INIT*(-1.0)*(1.0+cos(PI*z))
// *(sin(2*PI*x/lx)*sin(2*PI*y/ly)
// +sin(4*PI*x/lx)*sin(2*PI*y/ly)
// +sin(2*PI*x/lx)*sin(4*PI*y/ly));
//
// //return -ly*sin(1.5*PI*z)*(0.5*sin(2.0*PI*x)
// // *cos(2.0*PI*y) + 0.5*sin(4.0*PI*x)
// // *cos(2.0*PI*y) + 0.25*sin(2.0*PI*x)
// // *cos(4.0*PI*y));
//}
//
//__device__ REAL _get_init_omegax(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL pi = 4 * atan(1.0);
// return (-EPSILON_INIT*ly*pi*cos(pi*z)*(0.5*sin(2.0*pi*x/lx)
// *cos(2.0*pi*y/ly) + 0.5*sin(4.0*pi*x/lx)
// *cos(2.0*pi*y/ly) + 0.25*sin(2.0*pi*x/lx)
// *cos(4.0*pi*y/ly)))
//
// -(EPSILON_INIT*(1.0 + cos(pi*z))*4.0*pi / ly*(0.5*sin(2.0*pi*x/lx)
// *cos(2.0*pi*y/ly) + 0.5*sin(4.0*pi*x/lx)
// *cos(2.0*pi*y/ly) + sin(2.0*pi*x/lx)
// *cos(4.0*pi*y/ly)));
//}
//
//__device__ REAL _get_init_omegaz(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL pi = 4 * atan(1.0);
// return EPSILON_INIT*2.0*pi*sin(pi*z)*
// (lx / ly*(cos(2.0*pi*x/lx)*cos(2.0*pi*y/ly)
// +0.5*cos(4.0*pi*x/lx)*cos(2.0*pi*y/ly)
// +2.0*cos(2.0*pi*x/lx)*cos(4.0*pi*y/ly))
// +
// ly / lx*(0.5*cos(2.0*pi*x/lx)*cos(2.0*pi*y/ly)
// +cos(4.0*pi*x/lx)*cos(2.0*pi*y/ly)
// +0.25*cos(2.0*pi*x/lx)*cos(4.0*pi*y/ly)));
//}
//
//
//__device__ REAL _get_init_omegay(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL PI = 4 * atan(1.0);
// return
// EPSILON_INIT*(-1.0) *(1.0 + cos(PI*z))
// *2*PI/lx*(
// cos(2 * PI*x / lx)*sin(2 * PI*y / ly)
// +2.0*cos(4 * PI*x / lx)*sin(2 * PI*y / ly)
// + cos(2 * PI*x / lx)*sin(4 * PI*y / ly))
// -
// EPSILON_INIT*lx*PI*cos(PI*z)*(
// cos(2 * PI*x / lx)*sin(2 * PI*y / ly)
// + 0.5*cos(4 * PI*x / lx)*sin(2 * PI*y / ly)
// + cos(2 * PI*x / lx)*sin(4 * PI*y / ly)
// );
//}
//
//
//_____________________________END_______________________________
__device__ REAL _get_init_u(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL PI = 4 * atan(1.0);
return EPSILON_INIT*lx*sin(1.5*PI*z)
*(cos(2 * PI*x / lx)*sin(2.0*PI*y / ly)
+ 0.5*cos(4.0*PI*x / lx)*sin(2 * PI*y / ly)
+ cos(2 * PI*x / lx)*sin(4 * PI*y / ly));
//return sin(PI*x)*cos(2*PI*y);
//return (-2.0 / 3.0 *lx *(1.0 + cos(1.5*PI*z))*(sin(2.0*PI*x)
// *sin(2.0*PI*y) + sin(4.0*PI*x)
// *sin(2.0*PI*y) + sin(2.0*PI*x)
// *sin(4.0*PI*y)));
}
__device__ REAL _get_init_v(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL PI = 4 * atan(1.0);
return -EPSILON_INIT*ly*sin(1.5*PI*z)
*(0.5*sin(2 * PI*x / lx)*cos(2.0*PI*y / ly)
+ 0.5*sin(4.0*PI*x / lx)*cos(2.0 * PI*y / ly)
+ 0.25*sin(2.0 * PI*x / lx)*cos(4.0 * PI*y / ly));
//return -2.00 / 3.0*(1.0 + cos(1.5*PI*z))*(sin(2.0*PI*x)
// *sin(2.0*PI*y) + sin(4.0*PI*x)
// *sin(2.0*PI*y) + sin(2.0*PI*x)
// *sin(4.0*PI*y));
}
__device__ REAL _get_init_w(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL PI = 4 * atan(1.0);
return EPSILON_INIT*(-2.0/3.0)*(1.0 + cos(1.5*PI*z))
*(sin(2 * PI*x / lx)*sin(2 * PI*y / ly)
+ sin(4 * PI*x / lx)*sin(2 * PI*y / ly)
+ sin(2 * PI*x / lx)*sin(4 * PI*y / ly));
//return -ly*sin(1.5*PI*z)*(0.5*sin(2.0*PI*x)
// *cos(2.0*PI*y) + 0.5*sin(4.0*PI*x)
// *cos(2.0*PI*y) + 0.25*sin(2.0*PI*x)
// *cos(4.0*PI*y));
}
__device__ REAL _get_init_omegax(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL pi = 4 * atan(1.0);
return (-EPSILON_INIT*ly*1.5*pi*cos(1.5*pi*z)*(0.5*sin(2.0*pi*x / lx)
*cos(2.0*pi*y / ly) + 0.5*sin(4.0*pi*x / lx)
*cos(2.0*pi*y / ly) + 0.25*sin(2.0*pi*x / lx)
*cos(4.0*pi*y / ly)))
- (2.0/3.0*EPSILON_INIT*(1.0 + cos(1.5*pi*z))*4.0*pi / ly*(0.5*sin(2.0*pi*x / lx)
*cos(2.0*pi*y / ly) + 0.5*sin(4.0*pi*x / lx)
*cos(2.0*pi*y / ly) + sin(2.0*pi*x / lx)
*cos(4.0*pi*y / ly)));
}
__device__ REAL _get_init_omegaz(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL pi = 4 * atan(1.0);
return EPSILON_INIT*2.0*pi*sin(1.5*pi*z)*
(lx / ly*(cos(2.0*pi*x / lx)*cos(2.0*pi*y / ly)
+ 0.5*cos(4.0*pi*x / lx)*cos(2.0*pi*y / ly)
+ 2.0*cos(2.0*pi*x / lx)*cos(4.0*pi*y / ly))
+
ly / lx*(0.5*cos(2.0*pi*x / lx)*cos(2.0*pi*y / ly)
+ cos(4.0*pi*x / lx)*cos(2.0*pi*y / ly)
+ 0.25*cos(2.0*pi*x / lx)*cos(4.0*pi*y / ly)));
}
__device__ REAL _get_init_omegay(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL PI = 4 * atan(1.0);
return
EPSILON_INIT*(-1.0) *(1.0 + cos(1.5*PI*z))
* 2 * PI / lx*(
cos(2 * PI*x / lx)*sin(2 * PI*y / ly)
+ 2.0*cos(4 * PI*x / lx)*sin(2 * PI*y / ly)
+ cos(2 * PI*x / lx)*sin(4 * PI*y / ly))
-
EPSILON_INIT*lx*PI*1.5*cos(1.5*PI*z)*(
cos(2 * PI*x / lx)*sin(2 * PI*y / ly)
+ 0.5*cos(4 * PI*x / lx)*sin(2 * PI*y / ly)
+ cos(2 * PI*x / lx)*sin(4 * PI*y / ly)
);
}
// compute initial flow, save the data to pointer defined in pb.
// assuming the pointer are already initialized by initCUDA.
__global__ void init_flow_kernel(
REAL* dptr_u, REAL* dptr_v, REAL* dptr_w,
REAL* dptr_ox, REAL* dptr_oy, REAL* dptr_oz,
REAL lx, REAL ly,
int px, int py, int pz, int pitch) {
int y = threadIdx.x + blockDim.x*blockIdx.x;
int z = threadIdx.y + blockDim.y*blockIdx.y;
if (y >= py || z >= pz) return;
const REAL pi = 4 * atan(1.0);
REAL xx, yy, zz;
REAL* u_row, *v_row, *w_row, *ox_row, *oy_row, *oz_row;
//ASSERT(pitch > 0);
//ASSERT(dptr_u!=nullptr);
size_t inc = pitch*(py*z + y)/sizeof(REAL);
u_row = dptr_u + inc;
v_row = dptr_v + inc;
w_row = dptr_w + inc;
ox_row = dptr_ox + inc;
oy_row = dptr_oy + inc;
oz_row = dptr_oz + inc;
if (z == 0 || z == pz - 1) {
for (int x = 0; x < px; x++) {
u_row[x] = 0.0;
v_row[x] = 0.0;
w_row[x] = 0.0;
ox_row[x] = 0.0;
oy_row[x] = 0.0;
oz_row[x] = 0.0;
}
}
else
{
for (int x = 0; x < px; x++) {
xx = (x*1.0) / px * lx;
yy = (y*1.0) / py * ly;
zz = cos(pi*z / (pz - 1));
u_row[x] = _get_init_u(xx, yy, zz, lx, ly);
v_row[x] = _get_init_v(xx, yy, zz, lx, ly);
w_row[x] = _get_init_w(xx, yy, zz, lx, ly);
ox_row[x] = _get_init_omegax(xx, yy, zz, lx, ly);
oy_row[x] = _get_init_omegay(xx, yy, zz, lx, ly);
oz_row[x] = _get_init_omegaz(xx, yy, zz, lx, ly);
}
}
}
__host__ int initFlow(problem& pb) {
hipError_t err = hipDeviceSynchronize(); // CudaErrorLaunchFailure
ASSERT(err == hipSuccess);
//int nthreadx = 16;
//int nthready = 16;
//int nDimx = pb.py / nthreadx;
//int nDimy = pb.pz / nthready;
//if (pb.py % nthreadx != 0) nDimx++;
//if (pb.pz % nthready != 0) nDimy++;
//dim3 nThread(nthreadx, nthready);
//dim3 nDim(nDimx, nDimy);
hipLaunchKernelGGL(( init_flow_kernel) , dim3(pb.npDim), dim3(pb.nThread), 0, 0, (REAL*)pb.dptr_u.ptr,
(REAL*)pb.dptr_v.ptr, (REAL*)pb.dptr_w.ptr,
(REAL*)pb.dptr_omega_x.ptr, (REAL*)pb.dptr_omega_y.ptr,
(REAL*)pb.dptr_omega_z.ptr,
pb.lx, pb.ly, pb.px, pb.py, pb.nz, pb.dptr_u.pitch);
//system("pause");
err = hipDeviceSynchronize(); // CudaErrorLaunchFailure
ASSERT(err == hipSuccess);
REAL* buffer;
size_t& size = pb.pSize; //pb.dptr_u.pitch*pb.my*pb.mz;
size_t& tSize = pb.tSize;// pb.tPitch*(pb.mx / 2 + 1)*pb.my;
//buffer = (REAL*)malloc(size);
//cuCheck(hipMemcpy(buffer, pb.dptr_u.ptr, size, hipMemcpyDeviceToHost),"memcpy");
//err = hipDeviceSynchronize();
//ASSERT(err == hipSuccess);
//RPCF::write_3d_to_file("init.txt", buffer, pb.dptr_u.pitch, (pb.mx), pb.my, pb.pz);
int dim[3];
dim[0] = pb.mx;
dim[1] = pb.my;
dim[2] = pb.mz;
int tDim[3];
tDim[0] = pb.mz;
tDim[1] = pb.mx;
tDim[2] = pb.my;
transform_3d_one(FORWARD, pb.dptr_omega_z, pb.dptr_tomega_z, dim, tDim, No_Padding);
transform_3d_one(FORWARD, pb.dptr_omega_y, pb.dptr_tomega_y, dim, tDim, No_Padding);
transform_3d_one(FORWARD, pb.dptr_omega_x, pb.dptr_tomega_x, dim, tDim, No_Padding);
transform_3d_one(FORWARD, pb.dptr_w, pb.dptr_tw, dim, tDim, No_Padding);
transform_3d_one(FORWARD, pb.dptr_v, pb.dptr_tv, dim, tDim, No_Padding);
transform_3d_one(FORWARD, pb.dptr_u, pb.dptr_tu, dim, tDim, No_Padding);
//copy initial rhs_v and rhs_omeag_y
cuCheck(hipMemcpy(pb.rhs_v, pb.dptr_tw.ptr, tSize, hipMemcpyDeviceToHost), "memcpy");
cuCheck(hipMemcpy(pb.rhs_omega_y, pb.dptr_tomega_z.ptr, tSize, hipMemcpyDeviceToHost), "memcpy");
getUVW(pb);
for (int k = 0; k < pb.nz; k++) {
pb.tv0[k] = pb.rhs_v[k];
pb.tomega_y_0[k] = pb.rhs_omega_y[k];
}
for (int j = 0; j < pb.ny; j++) {
for (int i = 0; i < (pb.nx / 2 + 1); i++) {
for (int k = 0; k < pb.mz; k++) {
size_t inc = k+pb.tPitch/sizeof(cuRPCF::complex)*(j*(pb.nx / 2 + 1) + i);
pb.rhs_v_p[inc] = pb.rhs_v[inc];
}
}
}
//safeFree(buffer);
return 0;
}
//
//__host__ int computeNonlinear(problem& pb) {
//
// return 0;
//}
__host__ __device__ void ddz(REAL* u, int N) {
REAL buffer[MAX_NZ*4];
REAL dmat;
for (int i = 0; i < N; i++) {
buffer[i] = 0;
for (int j = i+1; j < N; j=j+2) {
dmat = 2 * (j);
buffer[i] = buffer[i] + dmat * u[j];
}
}
u[0] = buffer[0] * 0.5;
for (int i = 1; i < N; i++) {
u[i] = buffer[i];
}
}
__host__ __device__ void ddz(cuRPCF::complex *u, int N) {
cuRPCF::complex buffer[MAX_NZ];
REAL dmat;
cuRPCF::complex buffer_u[MAX_NZ];
for (int i = 0; i < N; i++) {
buffer_u[i] = u[i];
}
for (int i = 0; i < N; i++) {
buffer[i] = cuRPCF::complex(0.0,0.0);
for (int j = i + 1; j < N; j = j + 2) {
dmat = 2 * REAL(j);
buffer[i] = buffer[i] + buffer_u[j] * dmat;
}
}
u[0] = buffer[0] * 0.5;
for (int i = 1; i < N; i++) {
u[i] = buffer[i];
}
}
__device__ void ddz_sm(REAL* u, int N, int kz) {
REAL buffer;
REAL dmat;
//wait all threads to load data before computing
__syncthreads();
buffer = 0.0;
for (int j = kz + 1; j < N; j = j + 2) {
dmat = 2 * REAL(j);
buffer = buffer + u[j] * dmat;
}
//wait all threads to finish computation before overwriting array.
__syncthreads();
if (kz == 0) {
u[0] = buffer * 0.5;
}
else
{
u[kz] = buffer;
}
}
__device__ void ddz_sm(cuRPCF::complex *u, int N, int kz) {
cuRPCF::complex buffer;
REAL dmat;
//wait all threads to load data before computing
__syncthreads();
buffer = cuRPCF::complex(0.0,0.0);
for (int j = kz + 1; j < N; j = j + 2) {
dmat = 2 * REAL(j);
buffer = buffer + u[j] * dmat;
}
//wait all threads to finish computation before overwriting array.
__syncthreads();
if (kz == 0) {
u[0] = buffer * 0.5;
}
else
{
u[kz] = buffer;
}
}
__host__ __device__
void get_ialpha_ibeta(int kx, int ky, int ny,
REAL alpha, REAL beta,
REAL& ialpha, REAL& ibeta )
{
ialpha = (REAL)kx / alpha;
ibeta = (REAL)ky / beta;
if (ky >= ny / 2 + 1) {
ibeta = REAL(ky - ny) / beta;
}
}
// This kernel function is used to perform multiply between matrix and vector;
__global__
void m_multi_v_kernel(cuRPCF::complex* _mat, cuRPCF::complex* _v, const int N, const size_t pitch) {
const int iMat = blockIdx.x;
const int J = threadIdx.x;
const int tid = J;
__shared__ cuRPCF::complex UI[MAX_NZ];
__shared__ cuRPCF::complex buffer[MAX_NZ];
cuRPCF::complex* mat = _mat + iMat*N*N + J*N;
cuRPCF::complex* v = _v + pitch / sizeof(cuRPCF::complex)*iMat;
//cuRPCF::complex mat_cache[MAX_NZ];
//cuRPCF::complex v_cache[MAX_NZ];
//for (int i = 0; i < N; i++) {
// mat_cache[i] = mat[i];
//}
//for (int i = 0; i < N; i++) {
// v_cache[i] = v[i];
//}
//cuRPCF::complex res = cuRPCF::complex(0.0, 0.0);
//for (int k = 0; k < N; k++) {
// res = res + mat_cache[k] * v_cache[k];
//}
cuRPCF::complex res[MAX_NZ];
__shared__ cuRPCF::complex reduction[MAX_NZ];
// for each row
for (int i = 0; i < N; i++) {
UI[J] = mat[i*N + J];
buffer[J] = v[J];
__syncthreads();
buffer[J] = UI[J] * buffer[J];
__syncthreads();
if (tid == 0 && N % 2 != 0) buffer[tid] = buffer[tid] + buffer[N - 1];
for (int s = N/2; s>0; s = s / 2)
{
if (tid < s) buffer[tid] = buffer[tid] + buffer[tid + s];
if (tid == 0 && s % 2 != 0) buffer[tid] = buffer[tid] + buffer[s-1];
__syncthreads();
}
res[i] = buffer[0];
}
__syncthreads();
v[J] = res[J];
//cuRPCF::complex res[MAX_NZ];
////cuRPCF::complex* temp = (cuRPCF::complex*)malloc(N*sizeof(cuRPCF::complex));
//for (int i = 0; i < N; i++) {
// UI[J] = mat[i*N + J];
// __syncthreads();
// buffer[J] = 0;
// buffer[J] = UI[J] * v[J];
// if(J == 0){
// for (int j = 1; j < N; j++) {
// buffer[0] = buffer[0] + buffer[j];
// }
// res[i] = buffer[0];
// }
//}
//__syncthreads();
//v[J] = res[J];
}
__host__ hipError_t m_multi_v_gpu(cuRPCF::complex* _mat, cuRPCF::complex* v, const int N, const size_t pitch, const int batch) {
hipLaunchKernelGGL(( m_multi_v_kernel) , dim3(batch), dim3(N) , 0, 0, _mat, v, N, pitch);
return hipDeviceSynchronize();
}
| 0f043e56f945029a21c4d44da49159deec30221c.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../include/data.h"
#include "../include/RPCFKernels.cuh"
#include "../include/transform.cuh"
#include "../include/operation.h"
#include "../include/cuRPCF.h"
#include <cstdlib>
#include <math.h>
#include <cassert>
#include <iostream>
#include "../include/rhs.cuh"
#include "../include/transform_multi_gpu.h"
#include "../include/velocity.h"
__host__ void getDeviceInfo(problem& pb) {
cudaDeviceProp prop;
int dev_num;
int n_dev;
size_t free;
size_t total;
launch_subthread(pb);
cudaGetDevice(&dev_num);
cudaGetDeviceProperties(&prop, dev_num);
cudaMemGetInfo(&free, &total);
cudaGetDeviceCount(&n_dev);
//err = cudaDeviceReset();
//ASSERT(err == cudaSuccess);
printf("Using CUDA device %u. Device ID: %s on PCI-E %d\n",
dev_num, prop.name, prop.pciBusID);
printf("GPU total memory = % .2f MB\n", (float)total / (1.024e6));
printf("GPU free memory = % .2f MB\n", (float)free / (1.024e6));
printf("Total device number = :%d\n\n", n_dev);
//for (int i = 0; i < NUM_GPU; i++) {
// dev_id[i] = i%n_dev;
// assert(dev_id[0] == dev_num);
//}
//for (int i = 0; i < n_dev; i++) {
// cudaDeviceEnablePeerAccess(i, 0);
//}
//int accessibleTest;
//cudaDeviceCanAccessPeer(&accessibleTest, dev_id[0], dev_id[1]);
//if (accessibleTest != 1) { std::cerr << "peer access not supported" << std::endl; };
}
__host__ int allocDeviceMem(problem& pb) {
cudaError_t err;
pb.extent = make_cudaExtent(
2*(pb.mx/2+1) * sizeof(REAL), pb.my, pb.mz);
pb.tExtent = make_cudaExtent(
pb.mz * sizeof(cuRPCF::complex), pb.nx/2+1, pb.ny);
pb.pExtent = make_cudaExtent(
2 * (pb.mx / 2 + 1) * sizeof(REAL), pb.my, pb.pz);
// cudaExtent & extent = pb.extent;
cudaExtent & tExtent = pb.tExtent;
cudaExtent & pExtent = pb.pExtent;
// Get pitch value of the pointer.
err = cudaMalloc3D(&(pb.dptr_tu), tExtent);
pb.tPitch = pb.dptr_tu.pitch;
safeCudaFree(pb.dptr_tu.ptr);
pb.dptr_tu.ptr = nullptr;
initMyCudaMalloc(dim3(pb.mx, pb.my, pb.mz));
//cuCheck(cudaMalloc3D(&(pb.dptr_u), pExtent),"allocate");
//cuCheck(cudaMalloc3D(&(pb.dptr_v), pExtent), "allocate");
//cuCheck(cudaMalloc3D(&(pb.dptr_w), pExtent), "allocate");
//cuCheck(cudaMalloc3D(&(pb.dptr_omega_x), pExtent), "allocate");
//cuCheck(cudaMalloc3D(&(pb.dptr_omega_y), pExtent), "allocate");
//cuCheck(cudaMalloc3D(&(pb.dptr_omega_z), pExtent), "allocate");
cuCheck(myCudaMalloc(pb.dptr_u, XYZ_3D), "allocate");
cuCheck(myCudaMalloc(pb.dptr_v, XYZ_3D), "allocate");
cuCheck(myCudaMalloc(pb.dptr_w, XYZ_3D), "allocate");
cuCheck(myCudaMalloc(pb.dptr_omega_x, XYZ_3D), "allocate");
cuCheck(myCudaMalloc(pb.dptr_omega_y, XYZ_3D), "allocate");
cuCheck(myCudaMalloc(pb.dptr_omega_z, XYZ_3D), "allocate");
//cuCheck(cudaMalloc3D(&(pb.dptr_lamb_x), extent), "allocate");
//cuCheck(cudaMalloc3D(&(pb.dptr_lamb_y), extent), "allocate");
//cuCheck(cudaMalloc3D(&(pb.dptr_lamb_z), extent), "allocate");
pb.tSize = pb.tPitch * (pb.nx / 2 + 1) * pb.ny;
// size_t& tsize = pb.tSize;
//pb.nonlinear_v = (cuRPCF::complex*)malloc(tsize);
//pb.nonlinear_v_p = (cuRPCF::complex*)malloc(tsize);
//pb.nonlinear_omega_y = (cuRPCF::complex*)malloc(tsize);
//pb.nonlinear_omega_y_p = (cuRPCF::complex*)malloc(tsize);
//ASSERT(pb.nonlinear_v != nullptr);
//ASSERT(pb.nonlinear_v_p != nullptr);
//ASSERT(pb.nonlinear_omega_y != nullptr);
//ASSERT(pb.nonlinear_omega_y_p != nullptr);
//err = cudaMalloc3D(&(pb.dptr_tv), tExtent);
//err = cudaMalloc3D(&(pb.dptr_tw), tExtent);
//err = cudaMalloc3D(&(pb.dptr_tomega_x), tExtent);
//err = cudaMalloc3D(&(pb.dptr_tomega_y), tExtent);
//err = cudaMalloc3D(&(pb.dptr_tomega_z), tExtent);
//err = cudaMalloc3D(&(pb.dptr_tLamb_x), tExtent);
//err = cudaMalloc3D(&(pb.dptr_tLamb_y), tExtent);
//err = cudaMalloc3D(&(pb.dptr_tLamb_z), tExtent);
pb.dptr_tu.ptr = nullptr;
pb.dptr_tv.ptr = nullptr;
pb.dptr_tw.ptr = nullptr;
pb.dptr_tomega_x.ptr = nullptr;
pb.dptr_tomega_y.ptr = nullptr;
pb.dptr_tomega_z.ptr = nullptr;
pb.dptr_lamb_x.ptr = nullptr;
pb.dptr_lamb_y.ptr = nullptr;
pb.dptr_lamb_z.ptr = nullptr;
pb.dptr_tLamb_x.ptr = nullptr;
pb.dptr_tLamb_y.ptr = nullptr;
pb.dptr_tLamb_z.ptr = nullptr;
pb.pitch = pb.dptr_u.pitch;
pb.size = pb.pitch * pb.my * pb.mz;
pb.pSize = pb.pitch * pb.my * pb.pz;
ASSERT(!err);
return 0;
}
// note : x and y should be normalized by lx and ly.
// i.e. x = x/lx
#define EPSILON_INIT 0.005
// MARK :: this part is the modified CORRECT initial condition, remove comment mark before use
// ____________________________ BEGIN________________________//
//__device__ REAL _get_init_u(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL PI = 4*atan(1.0);
// return EPSILON_INIT*lx*sin(PI*z)
// *(cos(2 * PI*x / lx)*sin(2.0*PI*y / ly)
// + 0.5*cos(4.0*PI*x / lx)*sin(2 * PI*y / ly)
// + cos(2 * PI*x / lx)*sin(4 * PI*y / ly));
// //return sin(PI*x)*cos(2*PI*y);
// //return (-2.0 / 3.0 *lx *(1.0 + cos(1.5*PI*z))*(sin(2.0*PI*x)
// // *sin(2.0*PI*y) + sin(4.0*PI*x)
// // *sin(2.0*PI*y) + sin(2.0*PI*x)
// // *sin(4.0*PI*y)));
//}
//
//__device__ REAL _get_init_v(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL PI = 4 * atan(1.0);
// return -EPSILON_INIT*ly*sin(PI*z)
// *(0.5*sin(2 * PI*x / lx)*cos(2.0*PI*y / ly)
// + 0.5*sin(4.0*PI*x / lx)*cos(2.0 * PI*y / ly)
// + 0.25*sin(2.0 * PI*x / lx)*cos(4.0 * PI*y / ly));
// //return -2.00 / 3.0*(1.0 + cos(1.5*PI*z))*(sin(2.0*PI*x)
//// *sin(2.0*PI*y) + sin(4.0*PI*x)
// // *sin(2.0*PI*y) + sin(2.0*PI*x)
// // *sin(4.0*PI*y));
//}
//
//__device__ REAL _get_init_w(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL PI = 4 * atan(1.0);
// return EPSILON_INIT*(-1.0)*(1.0+cos(PI*z))
// *(sin(2*PI*x/lx)*sin(2*PI*y/ly)
// +sin(4*PI*x/lx)*sin(2*PI*y/ly)
// +sin(2*PI*x/lx)*sin(4*PI*y/ly));
//
// //return -ly*sin(1.5*PI*z)*(0.5*sin(2.0*PI*x)
// // *cos(2.0*PI*y) + 0.5*sin(4.0*PI*x)
// // *cos(2.0*PI*y) + 0.25*sin(2.0*PI*x)
// // *cos(4.0*PI*y));
//}
//
//__device__ REAL _get_init_omegax(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL pi = 4 * atan(1.0);
// return (-EPSILON_INIT*ly*pi*cos(pi*z)*(0.5*sin(2.0*pi*x/lx)
// *cos(2.0*pi*y/ly) + 0.5*sin(4.0*pi*x/lx)
// *cos(2.0*pi*y/ly) + 0.25*sin(2.0*pi*x/lx)
// *cos(4.0*pi*y/ly)))
//
// -(EPSILON_INIT*(1.0 + cos(pi*z))*4.0*pi / ly*(0.5*sin(2.0*pi*x/lx)
// *cos(2.0*pi*y/ly) + 0.5*sin(4.0*pi*x/lx)
// *cos(2.0*pi*y/ly) + sin(2.0*pi*x/lx)
// *cos(4.0*pi*y/ly)));
//}
//
//__device__ REAL _get_init_omegaz(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL pi = 4 * atan(1.0);
// return EPSILON_INIT*2.0*pi*sin(pi*z)*
// (lx / ly*(cos(2.0*pi*x/lx)*cos(2.0*pi*y/ly)
// +0.5*cos(4.0*pi*x/lx)*cos(2.0*pi*y/ly)
// +2.0*cos(2.0*pi*x/lx)*cos(4.0*pi*y/ly))
// +
// ly / lx*(0.5*cos(2.0*pi*x/lx)*cos(2.0*pi*y/ly)
// +cos(4.0*pi*x/lx)*cos(2.0*pi*y/ly)
// +0.25*cos(2.0*pi*x/lx)*cos(4.0*pi*y/ly)));
//}
//
//
//__device__ REAL _get_init_omegay(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
// const REAL PI = 4 * atan(1.0);
// return
// EPSILON_INIT*(-1.0) *(1.0 + cos(PI*z))
// *2*PI/lx*(
// cos(2 * PI*x / lx)*sin(2 * PI*y / ly)
// +2.0*cos(4 * PI*x / lx)*sin(2 * PI*y / ly)
// + cos(2 * PI*x / lx)*sin(4 * PI*y / ly))
// -
// EPSILON_INIT*lx*PI*cos(PI*z)*(
// cos(2 * PI*x / lx)*sin(2 * PI*y / ly)
// + 0.5*cos(4 * PI*x / lx)*sin(2 * PI*y / ly)
// + cos(2 * PI*x / lx)*sin(4 * PI*y / ly)
// );
//}
//
//
//_____________________________END_______________________________
__device__ REAL _get_init_u(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL PI = 4 * atan(1.0);
return EPSILON_INIT*lx*sin(1.5*PI*z)
*(cos(2 * PI*x / lx)*sin(2.0*PI*y / ly)
+ 0.5*cos(4.0*PI*x / lx)*sin(2 * PI*y / ly)
+ cos(2 * PI*x / lx)*sin(4 * PI*y / ly));
//return sin(PI*x)*cos(2*PI*y);
//return (-2.0 / 3.0 *lx *(1.0 + cos(1.5*PI*z))*(sin(2.0*PI*x)
// *sin(2.0*PI*y) + sin(4.0*PI*x)
// *sin(2.0*PI*y) + sin(2.0*PI*x)
// *sin(4.0*PI*y)));
}
__device__ REAL _get_init_v(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL PI = 4 * atan(1.0);
return -EPSILON_INIT*ly*sin(1.5*PI*z)
*(0.5*sin(2 * PI*x / lx)*cos(2.0*PI*y / ly)
+ 0.5*sin(4.0*PI*x / lx)*cos(2.0 * PI*y / ly)
+ 0.25*sin(2.0 * PI*x / lx)*cos(4.0 * PI*y / ly));
//return -2.00 / 3.0*(1.0 + cos(1.5*PI*z))*(sin(2.0*PI*x)
// *sin(2.0*PI*y) + sin(4.0*PI*x)
// *sin(2.0*PI*y) + sin(2.0*PI*x)
// *sin(4.0*PI*y));
}
__device__ REAL _get_init_w(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL PI = 4 * atan(1.0);
return EPSILON_INIT*(-2.0/3.0)*(1.0 + cos(1.5*PI*z))
*(sin(2 * PI*x / lx)*sin(2 * PI*y / ly)
+ sin(4 * PI*x / lx)*sin(2 * PI*y / ly)
+ sin(2 * PI*x / lx)*sin(4 * PI*y / ly));
//return -ly*sin(1.5*PI*z)*(0.5*sin(2.0*PI*x)
// *cos(2.0*PI*y) + 0.5*sin(4.0*PI*x)
// *cos(2.0*PI*y) + 0.25*sin(2.0*PI*x)
// *cos(4.0*PI*y));
}
__device__ REAL _get_init_omegax(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL pi = 4 * atan(1.0);
return (-EPSILON_INIT*ly*1.5*pi*cos(1.5*pi*z)*(0.5*sin(2.0*pi*x / lx)
*cos(2.0*pi*y / ly) + 0.5*sin(4.0*pi*x / lx)
*cos(2.0*pi*y / ly) + 0.25*sin(2.0*pi*x / lx)
*cos(4.0*pi*y / ly)))
- (2.0/3.0*EPSILON_INIT*(1.0 + cos(1.5*pi*z))*4.0*pi / ly*(0.5*sin(2.0*pi*x / lx)
*cos(2.0*pi*y / ly) + 0.5*sin(4.0*pi*x / lx)
*cos(2.0*pi*y / ly) + sin(2.0*pi*x / lx)
*cos(4.0*pi*y / ly)));
}
__device__ REAL _get_init_omegaz(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL pi = 4 * atan(1.0);
return EPSILON_INIT*2.0*pi*sin(1.5*pi*z)*
(lx / ly*(cos(2.0*pi*x / lx)*cos(2.0*pi*y / ly)
+ 0.5*cos(4.0*pi*x / lx)*cos(2.0*pi*y / ly)
+ 2.0*cos(2.0*pi*x / lx)*cos(4.0*pi*y / ly))
+
ly / lx*(0.5*cos(2.0*pi*x / lx)*cos(2.0*pi*y / ly)
+ cos(4.0*pi*x / lx)*cos(2.0*pi*y / ly)
+ 0.25*cos(2.0*pi*x / lx)*cos(4.0*pi*y / ly)));
}
__device__ REAL _get_init_omegay(REAL x, REAL y, REAL z, REAL lx, REAL ly) {
const REAL PI = 4 * atan(1.0);
return
EPSILON_INIT*(-1.0) *(1.0 + cos(1.5*PI*z))
* 2 * PI / lx*(
cos(2 * PI*x / lx)*sin(2 * PI*y / ly)
+ 2.0*cos(4 * PI*x / lx)*sin(2 * PI*y / ly)
+ cos(2 * PI*x / lx)*sin(4 * PI*y / ly))
-
EPSILON_INIT*lx*PI*1.5*cos(1.5*PI*z)*(
cos(2 * PI*x / lx)*sin(2 * PI*y / ly)
+ 0.5*cos(4 * PI*x / lx)*sin(2 * PI*y / ly)
+ cos(2 * PI*x / lx)*sin(4 * PI*y / ly)
);
}
// compute initial flow, save the data to pointer defined in pb.
// assuming the pointer are already initialized by initCUDA.
__global__ void init_flow_kernel(
REAL* dptr_u, REAL* dptr_v, REAL* dptr_w,
REAL* dptr_ox, REAL* dptr_oy, REAL* dptr_oz,
REAL lx, REAL ly,
int px, int py, int pz, int pitch) {
int y = threadIdx.x + blockDim.x*blockIdx.x;
int z = threadIdx.y + blockDim.y*blockIdx.y;
if (y >= py || z >= pz) return;
const REAL pi = 4 * atan(1.0);
REAL xx, yy, zz;
REAL* u_row, *v_row, *w_row, *ox_row, *oy_row, *oz_row;
//ASSERT(pitch > 0);
//ASSERT(dptr_u!=nullptr);
size_t inc = pitch*(py*z + y)/sizeof(REAL);
u_row = dptr_u + inc;
v_row = dptr_v + inc;
w_row = dptr_w + inc;
ox_row = dptr_ox + inc;
oy_row = dptr_oy + inc;
oz_row = dptr_oz + inc;
if (z == 0 || z == pz - 1) {
for (int x = 0; x < px; x++) {
u_row[x] = 0.0;
v_row[x] = 0.0;
w_row[x] = 0.0;
ox_row[x] = 0.0;
oy_row[x] = 0.0;
oz_row[x] = 0.0;
}
}
else
{
for (int x = 0; x < px; x++) {
xx = (x*1.0) / px * lx;
yy = (y*1.0) / py * ly;
zz = cos(pi*z / (pz - 1));
u_row[x] = _get_init_u(xx, yy, zz, lx, ly);
v_row[x] = _get_init_v(xx, yy, zz, lx, ly);
w_row[x] = _get_init_w(xx, yy, zz, lx, ly);
ox_row[x] = _get_init_omegax(xx, yy, zz, lx, ly);
oy_row[x] = _get_init_omegay(xx, yy, zz, lx, ly);
oz_row[x] = _get_init_omegaz(xx, yy, zz, lx, ly);
}
}
}
__host__ int initFlow(problem& pb) {
cudaError_t err = cudaDeviceSynchronize(); // CudaErrorLaunchFailure
ASSERT(err == cudaSuccess);
//int nthreadx = 16;
//int nthready = 16;
//int nDimx = pb.py / nthreadx;
//int nDimy = pb.pz / nthready;
//if (pb.py % nthreadx != 0) nDimx++;
//if (pb.pz % nthready != 0) nDimy++;
//dim3 nThread(nthreadx, nthready);
//dim3 nDim(nDimx, nDimy);
init_flow_kernel <<<pb.npDim, pb.nThread>>> ((REAL*)pb.dptr_u.ptr,
(REAL*)pb.dptr_v.ptr, (REAL*)pb.dptr_w.ptr,
(REAL*)pb.dptr_omega_x.ptr, (REAL*)pb.dptr_omega_y.ptr,
(REAL*)pb.dptr_omega_z.ptr,
pb.lx, pb.ly, pb.px, pb.py, pb.nz, pb.dptr_u.pitch);
//system("pause");
err = cudaDeviceSynchronize(); // CudaErrorLaunchFailure
ASSERT(err == cudaSuccess);
REAL* buffer;
size_t& size = pb.pSize; //pb.dptr_u.pitch*pb.my*pb.mz;
size_t& tSize = pb.tSize;// pb.tPitch*(pb.mx / 2 + 1)*pb.my;
//buffer = (REAL*)malloc(size);
//cuCheck(cudaMemcpy(buffer, pb.dptr_u.ptr, size, cudaMemcpyDeviceToHost),"memcpy");
//err = cudaDeviceSynchronize();
//ASSERT(err == cudaSuccess);
//RPCF::write_3d_to_file("init.txt", buffer, pb.dptr_u.pitch, (pb.mx), pb.my, pb.pz);
int dim[3];
dim[0] = pb.mx;
dim[1] = pb.my;
dim[2] = pb.mz;
int tDim[3];
tDim[0] = pb.mz;
tDim[1] = pb.mx;
tDim[2] = pb.my;
transform_3d_one(FORWARD, pb.dptr_omega_z, pb.dptr_tomega_z, dim, tDim, No_Padding);
transform_3d_one(FORWARD, pb.dptr_omega_y, pb.dptr_tomega_y, dim, tDim, No_Padding);
transform_3d_one(FORWARD, pb.dptr_omega_x, pb.dptr_tomega_x, dim, tDim, No_Padding);
transform_3d_one(FORWARD, pb.dptr_w, pb.dptr_tw, dim, tDim, No_Padding);
transform_3d_one(FORWARD, pb.dptr_v, pb.dptr_tv, dim, tDim, No_Padding);
transform_3d_one(FORWARD, pb.dptr_u, pb.dptr_tu, dim, tDim, No_Padding);
//copy initial rhs_v and rhs_omeag_y
cuCheck(cudaMemcpy(pb.rhs_v, pb.dptr_tw.ptr, tSize, cudaMemcpyDeviceToHost), "memcpy");
cuCheck(cudaMemcpy(pb.rhs_omega_y, pb.dptr_tomega_z.ptr, tSize, cudaMemcpyDeviceToHost), "memcpy");
getUVW(pb);
for (int k = 0; k < pb.nz; k++) {
pb.tv0[k] = pb.rhs_v[k];
pb.tomega_y_0[k] = pb.rhs_omega_y[k];
}
for (int j = 0; j < pb.ny; j++) {
for (int i = 0; i < (pb.nx / 2 + 1); i++) {
for (int k = 0; k < pb.mz; k++) {
size_t inc = k+pb.tPitch/sizeof(cuRPCF::complex)*(j*(pb.nx / 2 + 1) + i);
pb.rhs_v_p[inc] = pb.rhs_v[inc];
}
}
}
//safeFree(buffer);
return 0;
}
//
//__host__ int computeNonlinear(problem& pb) {
//
// return 0;
//}
__host__ __device__ void ddz(REAL* u, int N) {
REAL buffer[MAX_NZ*4];
REAL dmat;
for (int i = 0; i < N; i++) {
buffer[i] = 0;
for (int j = i+1; j < N; j=j+2) {
dmat = 2 * (j);
buffer[i] = buffer[i] + dmat * u[j];
}
}
u[0] = buffer[0] * 0.5;
for (int i = 1; i < N; i++) {
u[i] = buffer[i];
}
}
__host__ __device__ void ddz(cuRPCF::complex *u, int N) {
cuRPCF::complex buffer[MAX_NZ];
REAL dmat;
cuRPCF::complex buffer_u[MAX_NZ];
for (int i = 0; i < N; i++) {
buffer_u[i] = u[i];
}
for (int i = 0; i < N; i++) {
buffer[i] = cuRPCF::complex(0.0,0.0);
for (int j = i + 1; j < N; j = j + 2) {
dmat = 2 * REAL(j);
buffer[i] = buffer[i] + buffer_u[j] * dmat;
}
}
u[0] = buffer[0] * 0.5;
for (int i = 1; i < N; i++) {
u[i] = buffer[i];
}
}
__device__ void ddz_sm(REAL* u, int N, int kz) {
REAL buffer;
REAL dmat;
//wait all threads to load data before computing
__syncthreads();
buffer = 0.0;
for (int j = kz + 1; j < N; j = j + 2) {
dmat = 2 * REAL(j);
buffer = buffer + u[j] * dmat;
}
//wait all threads to finish computation before overwriting array.
__syncthreads();
if (kz == 0) {
u[0] = buffer * 0.5;
}
else
{
u[kz] = buffer;
}
}
__device__ void ddz_sm(cuRPCF::complex *u, int N, int kz) {
cuRPCF::complex buffer;
REAL dmat;
//wait all threads to load data before computing
__syncthreads();
buffer = cuRPCF::complex(0.0,0.0);
for (int j = kz + 1; j < N; j = j + 2) {
dmat = 2 * REAL(j);
buffer = buffer + u[j] * dmat;
}
//wait all threads to finish computation before overwriting array.
__syncthreads();
if (kz == 0) {
u[0] = buffer * 0.5;
}
else
{
u[kz] = buffer;
}
}
__host__ __device__
void get_ialpha_ibeta(int kx, int ky, int ny,
REAL alpha, REAL beta,
REAL& ialpha, REAL& ibeta )
{
ialpha = (REAL)kx / alpha;
ibeta = (REAL)ky / beta;
if (ky >= ny / 2 + 1) {
ibeta = REAL(ky - ny) / beta;
}
}
// This kernel function is used to perform multiply between matrix and vector;
__global__
void m_multi_v_kernel(cuRPCF::complex* _mat, cuRPCF::complex* _v, const int N, const size_t pitch) {
const int iMat = blockIdx.x;
const int J = threadIdx.x;
const int tid = J;
__shared__ cuRPCF::complex UI[MAX_NZ];
__shared__ cuRPCF::complex buffer[MAX_NZ];
cuRPCF::complex* mat = _mat + iMat*N*N + J*N;
cuRPCF::complex* v = _v + pitch / sizeof(cuRPCF::complex)*iMat;
//cuRPCF::complex mat_cache[MAX_NZ];
//cuRPCF::complex v_cache[MAX_NZ];
//for (int i = 0; i < N; i++) {
// mat_cache[i] = mat[i];
//}
//for (int i = 0; i < N; i++) {
// v_cache[i] = v[i];
//}
//cuRPCF::complex res = cuRPCF::complex(0.0, 0.0);
//for (int k = 0; k < N; k++) {
// res = res + mat_cache[k] * v_cache[k];
//}
cuRPCF::complex res[MAX_NZ];
__shared__ cuRPCF::complex reduction[MAX_NZ];
// for each row
for (int i = 0; i < N; i++) {
UI[J] = mat[i*N + J];
buffer[J] = v[J];
__syncthreads();
buffer[J] = UI[J] * buffer[J];
__syncthreads();
if (tid == 0 && N % 2 != 0) buffer[tid] = buffer[tid] + buffer[N - 1];
for (int s = N/2; s>0; s = s / 2)
{
if (tid < s) buffer[tid] = buffer[tid] + buffer[tid + s];
if (tid == 0 && s % 2 != 0) buffer[tid] = buffer[tid] + buffer[s-1];
__syncthreads();
}
res[i] = buffer[0];
}
__syncthreads();
v[J] = res[J];
//cuRPCF::complex res[MAX_NZ];
////cuRPCF::complex* temp = (cuRPCF::complex*)malloc(N*sizeof(cuRPCF::complex));
//for (int i = 0; i < N; i++) {
// UI[J] = mat[i*N + J];
// __syncthreads();
// buffer[J] = 0;
// buffer[J] = UI[J] * v[J];
// if(J == 0){
// for (int j = 1; j < N; j++) {
// buffer[0] = buffer[0] + buffer[j];
// }
// res[i] = buffer[0];
// }
//}
//__syncthreads();
//v[J] = res[J];
}
__host__ cudaError_t m_multi_v_gpu(cuRPCF::complex* _mat, cuRPCF::complex* v, const int N, const size_t pitch, const int batch) {
m_multi_v_kernel <<<batch, N >>>(_mat, v, N, pitch);
return cudaDeviceSynchronize();
}
|
8d0152fb3b456b746002fbb9f06269102e13400a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "file1.h"
#include "file2.h"
#ifdef _WIN32
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#else
#define EXPORT
#define IMPORT
#endif
result_type __device__ file1_func(int x);
result_type_dynamic __device__ file2_func(int x);
IMPORT void __host__ cuda_dynamic_lib_func();
static __global__ void mixed_kernel(result_type* r, int x)
{
*r = file1_func(x);
result_type_dynamic rd = file2_func(x);
}
EXPORT int mixed_launch_kernel(int x)
{
cuda_dynamic_lib_func();
result_type* r;
hipError_t err = hipMallocManaged(&r, sizeof(result_type));
if (err != hipSuccess) {
std::cerr << "mixed_launch_kernel: hipMallocManaged failed: "
<< hipGetErrorString(err) << std::endl;
return x;
}
hipLaunchKernelGGL(( mixed_kernel), dim3(1), dim3(1), 0, 0, r, x);
err = hipGetLastError();
if (err != hipSuccess) {
std::cerr << "mixed_kernel [SYNC] failed: " << hipGetErrorString(err)
<< std::endl;
return x;
}
err = hipDeviceSynchronize();
if (err != hipSuccess) {
std::cerr << "mixed_kernel [ASYNC] failed: "
<< hipGetErrorString(hipGetLastError()) << std::endl;
return x;
}
int result = r->sum;
err = hipFree(r);
if (err != hipSuccess) {
std::cerr << "mixed_launch_kernel: hipFree failed: "
<< hipGetErrorString(err) << std::endl;
return x;
}
return result;
}
| 8d0152fb3b456b746002fbb9f06269102e13400a.cu |
#include <iostream>
#include "file1.h"
#include "file2.h"
#ifdef _WIN32
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#else
#define EXPORT
#define IMPORT
#endif
result_type __device__ file1_func(int x);
result_type_dynamic __device__ file2_func(int x);
IMPORT void __host__ cuda_dynamic_lib_func();
static __global__ void mixed_kernel(result_type* r, int x)
{
*r = file1_func(x);
result_type_dynamic rd = file2_func(x);
}
EXPORT int mixed_launch_kernel(int x)
{
cuda_dynamic_lib_func();
result_type* r;
cudaError_t err = cudaMallocManaged(&r, sizeof(result_type));
if (err != cudaSuccess) {
std::cerr << "mixed_launch_kernel: cudaMallocManaged failed: "
<< cudaGetErrorString(err) << std::endl;
return x;
}
mixed_kernel<<<1, 1>>>(r, x);
err = cudaGetLastError();
if (err != cudaSuccess) {
std::cerr << "mixed_kernel [SYNC] failed: " << cudaGetErrorString(err)
<< std::endl;
return x;
}
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
std::cerr << "mixed_kernel [ASYNC] failed: "
<< cudaGetErrorString(cudaGetLastError()) << std::endl;
return x;
}
int result = r->sum;
err = cudaFree(r);
if (err != cudaSuccess) {
std::cerr << "mixed_launch_kernel: cudaFree failed: "
<< cudaGetErrorString(err) << std::endl;
return x;
}
return result;
}
|
ef1b5047b3149e29f735bd12771ad466aa712e89.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void kernNaiveScan(int n, int i, int* dev_idata, int* dev_odata)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n)
{
return;
}
int p = 1 << (i - 1);
if (index >= p)
{
dev_odata[index] = dev_idata[index - p] + dev_idata[index];
}
else
{
dev_odata[index] = dev_idata[index];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO
int *dev_idata, *dev_odata;
int m = ilog2ceil(n);
hipMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAError("hipMalloc dev_idata failed!");
hipMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed!");
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
timer().startGpuTimer();
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
for (int i = 1; i <= m; i++)
{
kernNaiveScan << <fullBlocksPerGrid, blockSize >> >(n, i, dev_idata, dev_odata);
std::swap(dev_idata, dev_odata);
}
std::swap(dev_idata, dev_odata);
timer().endGpuTimer();
//inclusive->exclusive
hipMemcpy(odata + 1, dev_odata, (n - 1) * sizeof(int), hipMemcpyDeviceToHost);
odata[0] = 0;
hipFree(dev_idata);
hipFree(dev_odata);
}
}
}
| ef1b5047b3149e29f735bd12771ad466aa712e89.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void kernNaiveScan(int n, int i, int* dev_idata, int* dev_odata)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n)
{
return;
}
int p = 1 << (i - 1);
if (index >= p)
{
dev_odata[index] = dev_idata[index - p] + dev_idata[index];
}
else
{
dev_odata[index] = dev_idata[index];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO
int *dev_idata, *dev_odata;
int m = ilog2ceil(n);
cudaMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_idata failed!");
cudaMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
timer().startGpuTimer();
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
for (int i = 1; i <= m; i++)
{
kernNaiveScan << <fullBlocksPerGrid, blockSize >> >(n, i, dev_idata, dev_odata);
std::swap(dev_idata, dev_odata);
}
std::swap(dev_idata, dev_odata);
timer().endGpuTimer();
//inclusive->exclusive
cudaMemcpy(odata + 1, dev_odata, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost);
odata[0] = 0;
cudaFree(dev_idata);
cudaFree(dev_odata);
}
}
}
|
18859821e7f8aa121ac107236178e65a59de9c88.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "im2col.h"
#include "hip/hip_runtime.h"
}
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK),
dim3(BLOCK), 0, get_cuda_stream() ,
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
| 18859821e7f8aa121ac107236178e65a59de9c88.cu | #include "pch.h"
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "im2col.h"
#include "cuda.h"
}
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
BLOCK, 0, get_cuda_stream() >>>(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
|
07a695d5a0baa8190af23a630d5da2d0bda740ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* test.cu */
// Libraries :
# include <stdlib.h>
# include <stdio.h>
# include <string.h>
# include <time.h>
# include <ctime>
# include <math.h>
# include <unistd.h>
# include <iostream>
# include <fstream>
using namespace std;
#define NB_THREADS 512
int number_of_blocks(int n)
{
int res;
res = n/NB_THREADS;
if ( n % NB_THREADS != 0)
res++;
return res;
}
__global__ void ker(int *T, int n)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k < n)
T[k] = k;
}
int main()
{
int n = 512;
int e = 9;
int i, nb_blocks;
int *Td, *Th;
for (i=0; i<100; i++)
{
printf("n = %d\ne = %d\n", n, e);
Th = (int*) calloc(n, sizeof(int));
printf(" --> Th calloc done\n");
hipMalloc( (void **) &Td, n * sizeof(int) );
hipDeviceSynchronize();
printf(" --> Td hipMalloc done\n");
hipMemcpy( Td, Th, n*sizeof(int), hipMemcpyHostToDevice );
hipDeviceSynchronize();
printf(" --> hipMemcpy(Td, Th) done\n");
nb_blocks = number_of_blocks(n);
hipLaunchKernelGGL(( ker), dim3(nb_blocks), dim3(NB_THREADS), 0, 0, Td, n);
printf(" --> ker(Td) done\n");
hipMemcpy( Th, Td, n*sizeof(int), hipMemcpyDeviceToHost );
hipDeviceSynchronize();
printf(" --> hipMemcpy(Th, Td) done\n");
free(Th);
hipFree(Td);
n *= 2;
e++;
}
}
| 07a695d5a0baa8190af23a630d5da2d0bda740ec.cu | /* test.cu */
// Libraries :
# include <stdlib.h>
# include <stdio.h>
# include <string.h>
# include <time.h>
# include <ctime>
# include <math.h>
# include <unistd.h>
# include <iostream>
# include <fstream>
using namespace std;
#define NB_THREADS 512
int number_of_blocks(int n)
{
int res;
res = n/NB_THREADS;
if ( n % NB_THREADS != 0)
res++;
return res;
}
__global__ void ker(int *T, int n)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k < n)
T[k] = k;
}
int main()
{
int n = 512;
int e = 9;
int i, nb_blocks;
int *Td, *Th;
for (i=0; i<100; i++)
{
printf("n = %d\ne = %d\n", n, e);
Th = (int*) calloc(n, sizeof(int));
printf(" --> Th calloc done\n");
cudaMalloc( (void **) &Td, n * sizeof(int) );
cudaThreadSynchronize();
printf(" --> Td cudaMalloc done\n");
cudaMemcpy( Td, Th, n*sizeof(int), cudaMemcpyHostToDevice );
cudaThreadSynchronize();
printf(" --> cudaMemcpy(Td, Th) done\n");
nb_blocks = number_of_blocks(n);
ker<<<nb_blocks, NB_THREADS>>>(Td, n);
printf(" --> ker(Td) done\n");
cudaMemcpy( Th, Td, n*sizeof(int), cudaMemcpyDeviceToHost );
cudaThreadSynchronize();
printf(" --> cudaMemcpy(Th, Td) done\n");
free(Th);
cudaFree(Td);
n *= 2;
e++;
}
}
|
a9b87815e431300510dfe57efbf787bd56aaee4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <time.h>
#include <assert.h>
#include <sm_11_atomic_functions.h>
#include <shrUtils.h>
struct position_t
{
int lock;
int diff;
int width;
int height;
};
int frame_width = 1024;
int frame_height = 1024;
int body_width = 32;
int body_height = 32;
int* h_frame = NULL;
int* h_body = NULL;
int* h_diff = NULL;
position_t* h_position = NULL;
int* d_frame = NULL;
int* d_body = NULL;
int* d_diff = NULL;
position_t* d_position = NULL;
int threadsPerBlock;
int blocksPerGrid;
int LOOP_NUM = 1;
unsigned int seed = 0x1234567;
//timespec start_time;
//timespec end_time;
__global__ void body_track(int* frame, int frame_width, int frame_height,
int* body, int body_width, int body_height, int* diff, position_t* pos, int threadsPerBlock)
{
__shared__ int sub_body[1024];
int part_size = body_height*body_width/threadsPerBlock;
for(int m=threadIdx.x*part_size; m < (threadIdx.x+1)*part_size; ++m){
sub_body[m]=body[m];
}
__syncthreads();
int i = (blockIdx.x*threadsPerBlock + threadIdx.x) / frame_width;
int j = (blockIdx.x*threadsPerBlock + threadIdx.x) % frame_width;
if(i >= (frame_height-body_height+1) || j >= (frame_width-body_width+1)){
return;
}
int* sub_frame = frame + i*frame_width + j;
int total_diff = 0;
for(int k = 0; k < body_height; k++)
{
for(int l = 0; l < body_width; l++)
{
int diff = sub_frame[k*frame_width+l] - sub_body[k*body_width+l];
diff = diff < 0 ? diff * -1 : diff;
total_diff += diff;
}
}
diff[i*frame_width+j] = total_diff;
if(total_diff < pos->diff)
{
pos->diff = total_diff;
pos->height = i;
pos->width = j;
pos->diff = total_diff;
}
};
unsigned int myrand(unsigned int *seed, unsigned int input)
{
*seed ^= (*seed << 13) ^ (*seed >> 15) + input;
*seed += (*seed << 17) ^ (*seed >> 14) ^ input;
return *seed;
};
void sig_check()
{
unsigned int sig = 0x1234567;
for(int i = 0; i < frame_height; i++)
for(int j = 0; j < frame_width; j++)
myrand(&sig, h_diff[i*frame_width+j]);
//myrand(&sig, h_position->height);
//myrand(&sig, h_position->width);
printf("Computed check sum signature:0x%08x\n", sig);
if(sig == 0x17dd3971)
printf("Result check by signature successful!!\n");
else
printf("Result check by signature failed!!\n");
}
void show_array(int* array, int width, int height)
{
for(int i = 0; i < height; i++)
{
for(int j = 0; j < width; j++)
printf("%03d, ", array[i*width+j]);
printf("\n");
}
printf("\n");
}
int main (int argc, char *argv[])
{
// get the dimension of array
assert(argc == 2);
LOOP_NUM = atoi(argv[1]);
printf("LOOP_NUM:%d\n", LOOP_NUM);
// Allocate input vectors h_A and h_B in host memory
h_frame = (int*)malloc(frame_width*frame_height*sizeof(int));
h_body = (int*)malloc(body_width*body_height*sizeof(int));
h_diff = (int*)malloc(frame_width*frame_height*sizeof(int));
h_position = (position_t*)malloc(sizeof(position_t));
assert(h_frame);
assert(h_body);
assert(h_diff);
assert(h_position);
// initial frame, body, diff
for(int i = 0; i < frame_height; i++)
for(int j = 0; j < frame_width; j++)
{
h_frame[i*frame_width+j] = myrand(&seed, i*j) & 0xff;
h_diff[i*frame_width+j] = 0;
}
for(int i = 0; i < body_height; i++)
for(int j = 0; j < body_width; j++)
{
h_body[i*body_width+j] = myrand(&seed, i*j) & 0xff;
}
h_position->lock = 0;
h_position->diff = 0x7fffffff;
h_position->width = -1;
h_position->height = -1;
//clock_gettime(CLOCK_REALTIME, &start_time);
// Allocate vectors in device memory
cutilSafeCall( hipMalloc((void**)&d_frame, frame_width*frame_height*sizeof(int)) );
cutilSafeCall( hipMalloc((void**)&d_body, body_width*body_height*sizeof(int)) );
cutilSafeCall( hipMalloc((void**)&d_diff, frame_width*frame_height*sizeof(int)) );
cutilSafeCall( hipMalloc((void**)&d_position, sizeof(*h_position)) );
// Copy vectors from host memory to device memory
cutilSafeCall( hipMemcpy(d_frame, h_frame, frame_width*frame_height*sizeof(int), hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_body, h_body, body_width*body_height*sizeof(int), hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_position, h_position, sizeof(*h_position), hipMemcpyHostToDevice) );
// Invoke kernel
threadsPerBlock = 256;
blocksPerGrid = (frame_height*frame_width + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( body_track), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_frame, frame_width, frame_height, d_body, body_width, body_height, d_diff, d_position, threadsPerBlock);
cutilCheckMsg("kernel launch failure");
// Copy result from device memory to host memory
// h_C contains the result in host memory
cutilSafeCall( hipMemcpy(h_diff, d_diff, frame_width*frame_height*sizeof(int), hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy(h_position, d_position, sizeof(*h_position), hipMemcpyDeviceToHost) );
//clock_gettime(CLOCK_REALTIME, &end_time);
printf("position(%d,%d):%d\n", h_position->width, h_position->height, h_position->diff);
//printf("sizeof(start_time.tv_sec):%d, sizeof(start_time.tv_nsec):%d\n", sizeof(start_time.tv_sec), sizeof(start_time.tv_nsec));
//printf("s_time.tv_sec:%d, s_time.tv_nsec:%d\n", start_time.tv_sec, start_time.tv_nsec);
//printf("e_time.tv_sec:%d, e_time.tv_nsec:%d\n", end_time.tv_sec, end_time.tv_nsec);
//double execution_time = (double)end_time.tv_sec + (double)end_time.tv_nsec/1000000000.0
// - (double)start_time.tv_sec - (double)start_time.tv_nsec/1000000000.0;
//printf("diff_time:%.4f(s)\n", execution_time);
//show_array(h_frame, frame_width, frame_height);
//show_array(h_body, body_width, body_height);
//show_array(h_diff, frame_width, frame_height);
sig_check();
//cutilSafeCall( hipDeviceReset() );
return 0;
}
| a9b87815e431300510dfe57efbf787bd56aaee4c.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <time.h>
#include <assert.h>
#include <sm_11_atomic_functions.h>
#include <shrUtils.h>
struct position_t
{
int lock;
int diff;
int width;
int height;
};
int frame_width = 1024;
int frame_height = 1024;
int body_width = 32;
int body_height = 32;
int* h_frame = NULL;
int* h_body = NULL;
int* h_diff = NULL;
position_t* h_position = NULL;
int* d_frame = NULL;
int* d_body = NULL;
int* d_diff = NULL;
position_t* d_position = NULL;
int threadsPerBlock;
int blocksPerGrid;
int LOOP_NUM = 1;
unsigned int seed = 0x1234567;
//timespec start_time;
//timespec end_time;
__global__ void body_track(int* frame, int frame_width, int frame_height,
int* body, int body_width, int body_height, int* diff, position_t* pos, int threadsPerBlock)
{
__shared__ int sub_body[1024];
int part_size = body_height*body_width/threadsPerBlock;
for(int m=threadIdx.x*part_size; m < (threadIdx.x+1)*part_size; ++m){
sub_body[m]=body[m];
}
__syncthreads();
int i = (blockIdx.x*threadsPerBlock + threadIdx.x) / frame_width;
int j = (blockIdx.x*threadsPerBlock + threadIdx.x) % frame_width;
if(i >= (frame_height-body_height+1) || j >= (frame_width-body_width+1)){
return;
}
int* sub_frame = frame + i*frame_width + j;
int total_diff = 0;
for(int k = 0; k < body_height; k++)
{
for(int l = 0; l < body_width; l++)
{
int diff = sub_frame[k*frame_width+l] - sub_body[k*body_width+l];
diff = diff < 0 ? diff * -1 : diff;
total_diff += diff;
}
}
diff[i*frame_width+j] = total_diff;
if(total_diff < pos->diff)
{
pos->diff = total_diff;
pos->height = i;
pos->width = j;
pos->diff = total_diff;
}
};
unsigned int myrand(unsigned int *seed, unsigned int input)
{
*seed ^= (*seed << 13) ^ (*seed >> 15) + input;
*seed += (*seed << 17) ^ (*seed >> 14) ^ input;
return *seed;
};
void sig_check()
{
unsigned int sig = 0x1234567;
for(int i = 0; i < frame_height; i++)
for(int j = 0; j < frame_width; j++)
myrand(&sig, h_diff[i*frame_width+j]);
//myrand(&sig, h_position->height);
//myrand(&sig, h_position->width);
printf("Computed check sum signature:0x%08x\n", sig);
if(sig == 0x17dd3971)
printf("Result check by signature successful!!\n");
else
printf("Result check by signature failed!!\n");
}
void show_array(int* array, int width, int height)
{
for(int i = 0; i < height; i++)
{
for(int j = 0; j < width; j++)
printf("%03d, ", array[i*width+j]);
printf("\n");
}
printf("\n");
}
int main (int argc, char *argv[])
{
// get the dimension of array
assert(argc == 2);
LOOP_NUM = atoi(argv[1]);
printf("LOOP_NUM:%d\n", LOOP_NUM);
// Allocate input vectors h_A and h_B in host memory
h_frame = (int*)malloc(frame_width*frame_height*sizeof(int));
h_body = (int*)malloc(body_width*body_height*sizeof(int));
h_diff = (int*)malloc(frame_width*frame_height*sizeof(int));
h_position = (position_t*)malloc(sizeof(position_t));
assert(h_frame);
assert(h_body);
assert(h_diff);
assert(h_position);
// initial frame, body, diff
for(int i = 0; i < frame_height; i++)
for(int j = 0; j < frame_width; j++)
{
h_frame[i*frame_width+j] = myrand(&seed, i*j) & 0xff;
h_diff[i*frame_width+j] = 0;
}
for(int i = 0; i < body_height; i++)
for(int j = 0; j < body_width; j++)
{
h_body[i*body_width+j] = myrand(&seed, i*j) & 0xff;
}
h_position->lock = 0;
h_position->diff = 0x7fffffff;
h_position->width = -1;
h_position->height = -1;
//clock_gettime(CLOCK_REALTIME, &start_time);
// Allocate vectors in device memory
cutilSafeCall( cudaMalloc((void**)&d_frame, frame_width*frame_height*sizeof(int)) );
cutilSafeCall( cudaMalloc((void**)&d_body, body_width*body_height*sizeof(int)) );
cutilSafeCall( cudaMalloc((void**)&d_diff, frame_width*frame_height*sizeof(int)) );
cutilSafeCall( cudaMalloc((void**)&d_position, sizeof(*h_position)) );
// Copy vectors from host memory to device memory
cutilSafeCall( cudaMemcpy(d_frame, h_frame, frame_width*frame_height*sizeof(int), cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_body, h_body, body_width*body_height*sizeof(int), cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_position, h_position, sizeof(*h_position), cudaMemcpyHostToDevice) );
// Invoke kernel
threadsPerBlock = 256;
blocksPerGrid = (frame_height*frame_width + threadsPerBlock - 1) / threadsPerBlock;
body_track<<<blocksPerGrid, threadsPerBlock>>>(d_frame, frame_width, frame_height, d_body, body_width, body_height, d_diff, d_position, threadsPerBlock);
cutilCheckMsg("kernel launch failure");
// Copy result from device memory to host memory
// h_C contains the result in host memory
cutilSafeCall( cudaMemcpy(h_diff, d_diff, frame_width*frame_height*sizeof(int), cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy(h_position, d_position, sizeof(*h_position), cudaMemcpyDeviceToHost) );
//clock_gettime(CLOCK_REALTIME, &end_time);
printf("position(%d,%d):%d\n", h_position->width, h_position->height, h_position->diff);
//printf("sizeof(start_time.tv_sec):%d, sizeof(start_time.tv_nsec):%d\n", sizeof(start_time.tv_sec), sizeof(start_time.tv_nsec));
//printf("s_time.tv_sec:%d, s_time.tv_nsec:%d\n", start_time.tv_sec, start_time.tv_nsec);
//printf("e_time.tv_sec:%d, e_time.tv_nsec:%d\n", end_time.tv_sec, end_time.tv_nsec);
//double execution_time = (double)end_time.tv_sec + (double)end_time.tv_nsec/1000000000.0
// - (double)start_time.tv_sec - (double)start_time.tv_nsec/1000000000.0;
//printf("diff_time:%.4f(s)\n", execution_time);
//show_array(h_frame, frame_width, frame_height);
//show_array(h_body, body_width, body_height);
//show_array(h_diff, frame_width, frame_height);
sig_check();
//cutilSafeCall( cudaThreadExit() );
return 0;
}
|
9fa2d13a4a7bedcbf03bc77e8ac6e118e7f248a1.hip | // !!! This is a file automatically generated by hipify!!!
// Inclusion of header files for running CUDA in Visual Studio Pro 2019 (v142)
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// Inclusion of the required CUDA libriaries and header files
#include <hiprand/hiprand.h>
#include <hip/hip_runtime.h>
// Inclusion of headers from the standard library in C
#include <cstdio>
#include <cstdint> //uint32_t
#include <cstdlib>
#include <ctime>
#include <cmath>
// Inclusion if headers from the C++ STL
#include <any>
#include <iostream>
#include <string>
#include <thread>
using std::cout;
using std::endl;
#if _WIN32
// Windows implementation of the Linux sys/time.h fnuctions needed in this program
#include <sys/timeb.h>
#include <sys/types.h>
#include <winsock2.h>
#define __need_clock_t
/* Structure describing CPU time used by a process and its children. */
struct tms
{
clock_t tms_utime; /* User CPU time. */
clock_t tms_stime; /* System CPU time. */
clock_t tms_cutime; /* User CPU time of dead children. */
clock_t tms_cstime; /* System CPU time of dead children. */
};
// CUDA 8+ requirment
struct timezone
{
int tz_minuteswest; /* minutes west of Greenwich */
int tz_dsttime; /* type of DST correction */
};
int
gettimeofday(struct timeval* t, std::any timezone)
{
struct _timeb timebuffer;
_ftime(&timebuffer);
t->tv_sec = static_cast<long>(timebuffer.time);
t->tv_usec = 1000 * timebuffer.millitm;
return 0;
}
/* Store the CPU time used by this process and all its
dead children (and their dead children) in BUFFER.
Return the elapsed real time, or (clock_t) -1 for errors.
All times are in CLK_TCKths of a second. */
clock_t
times(struct tms* __buffer)
{
__buffer->tms_utime = clock();
__buffer->tms_stime = 0;
__buffer->tms_cstime = 0;
__buffer->tms_cutime = 0;
return __buffer->tms_utime;
}
typedef long long suseconds_t;
#else
#include <sys/time.h>
#endif
// Q_SQRT function using C
float
quick_reverse_sqrt(const float number)
{
const float x2 = number * 0.5F;
const float three_halves = 1.5F;
union {
float f;
uint32_t i;
} conv;
conv.f = number;
conv.i = 0x5f3759df - (conv.i >> 1);
conv.f *= three_halves - (x2 * conv.f * conv.f);
conv.f *= three_halves - (x2 * conv.f * conv.f);
conv.f *= three_halves - (x2 * conv.f * conv.f);
return conv.f;
}
// Error codes for any error thrown in the next function
enum QuickInverseSqrtError_t
{
QRSqrtSuccess = 0,
ArrayOutOfBoundsException = 1,
DivideByZero = 2,
UnknownError = 3,
NDoesNotMatchSizeOfArray = 4
};
// Run Q_rsqrt over an array
QuickInverseSqrtError_t
calcOverArray(float* array, size_t N)
{
if ((sizeof(*array) / sizeof(array[0])) != N) return NDoesNotMatchSizeOfArray;
for (int i = 0; i < static_cast<int>(N); i++) {
if (array[i] == 0) return DivideByZero;
array[i] = quick_reverse_sqrt(array[i]);
}
return QRSqrtSuccess;
}
// CUDA error check to get error name
std::string
CUDA_CHECK_VAL(hipError_t x)
{
std::string msg;
switch (x) {
case 0:
msg = "hipSuccess";
case 1:
msg = "hipErrorInvalidValue";
case 2:
msg = "hipErrorMemoryAllocation";
case 3:
msg = "hipErrorInitializationError";
case 4:
msg = "hipErrorDeinitialized";
case 5:
msg = "hipErrorProfilerDisabled";
case 9:
msg = "hipErrorInvalidConfiguration";
case 12:
msg = "hipErrorInvalidPitchValue";
case 13:
msg = "hipErrorInvalidSymbol";
case 18:
msg = "hipErrorInvalidTexture";
case 19:
msg = "hipErrorInvalidTextureBinding";
case 20:
msg = "hipErrorInvalidChannelDescriptor";
case 21:
msg = "hipErrorInvalidMemcpyDirection";
case 26:
msg = "hipErrorInvalidFilterSetting";
case 27:
msg = "hipErrorInvalidNormSetting";
case 34:
msg = "cudaErrorStubLibrary";
case 35:
msg = "hipErrorInsufficientDriver";
case 36:
msg = "cudaErrorCallRequiresNewerDriver";
case 37:
msg = "hipErrorInvalidSurface";
case 43:
msg = "hipErrorDuplicateVariableName";
case 44:
msg = "hipErrorDuplicateTextureName";
case 45:
msg = "hipErrorDuplicateSurfaceName";
case 46:
msg = "hipErrorDevicesUnavailable";
case 49:
msg = "hipErrorIncompatibleDriverContext";
case 52:
msg = "hipErrorMissingConfiguration";
case 65:
msg = "hipErrorLaunchMaxDepthExceeded";
case 66:
msg = "hipErrorLaunchFileScopedTex";
case 67:
msg = "hipErrorLaunchFileScopedSurf";
case 68:
msg = "hipErrorSyncDepthExceeded";
case 69:
msg = "hipErrorLaunchPendingCountExceeded";
case 98:
msg = "hipErrorInvalidDeviceFunction";
case 100:
msg = "hipErrorNoDevice";
case 101:
msg = "hipErrorInvalidDevice";
case 102:
msg = "cudaErrorDeviceNotLicensed";
case 103:
msg = "cudaErrorSoftwareValidityNotEstablished";
case 127:
msg = "hipErrorStartupFailure";
case 200:
msg = "hipErrorInvalidImage";
case 201:
msg = "cudaErrorDeviceUninitialized";
case 205:
msg = "hipErrorMapFailed";
case 206:
msg = "hipErrorUnmapFailed";
case 207:
msg = "cudaErrorArrayIsMapped";
case 208:
msg = "cudaErrorAlreadyMapped";
case 209:
msg = "hipErrorNoBinaryForGpu";
case 210:
msg = "cudaErrorAlreadyAcquired";
case 211:
msg = "cudaErrorNotMapped";
case 212:
msg = "cudaErrorNotMappedAsArray";
case 213:
msg = "cudaErrorNotMappedAsPointer";
case 214:
msg = "hipErrorECCNotCorrectable";
case 215:
msg = "hipErrorUnsupportedLimit";
case 216:
msg = "hipErrorDeviceAlreadyInUse";
case 217:
msg = "hipErrorPeerAccessUnsupported";
case 218:
msg = "hipErrorInvalidKernelFile";
case 219:
msg = "hipErrorInvalidGraphicsContext";
case 220:
msg = "hipErrorNvlinkUncorrectable";
case 221:
msg = "cudaErrorJitCompilerNotFound";
case 222:
msg = "cudaErrorUnsupportedPtxVersion";
case 223:
msg = "cudaErrorJitCompilationDisabled";
case 300:
msg = "cudaErrorInvalidSource";
case 301:
msg = "cudaErrorFileNotFound";
case 302:
msg = "hipErrorSharedObjectSymbolNotFound";
case 303:
msg = "hipErrorSharedObjectInitFailed";
case 304:
msg = "hipErrorOperatingSystem";
case 400:
msg = "hipErrorInvalidResourceHandle";
case 401:
msg = "cudaErrorIllegalState";
case 500:
msg = "cudaErrorSymbolNotFound";
case 600:
msg = "hipErrorNotReady";
case 700:
msg = "hipErrorIllegalAddress";
case 701:
msg = "hipErrorLaunchOutOfResources";
case 702:
msg = "hipErrorLaunchTimeOut";
case 703:
msg = "cudaErrorLaunchIncompatibleTexturing";
case 704:
msg = "hipErrorPeerAccessAlreadyEnabled";
case 705:
msg = "hipErrorPeerAccessNotEnabled";
case 708:
msg = "hipErrorSetOnActiveProcess";
case 709:
msg = "hipErrorContextIsDestroyed";
case 710:
msg = "hipErrorAssert";
case 711:
msg = "hipErrorTooManyPeers";
case 712:
msg = "hipErrorHostMemoryAlreadyRegistered";
case 713:
msg = "hipErrorHostMemoryNotRegistered";
case 714:
msg = "hipErrorHardwareStackError";
case 715:
msg = "hipErrorIllegalInstruction";
case 716:
msg = "hipErrorMisalignedAddress";
case 717:
msg = "hipErrorInvalidAddressSpace";
case 718:
msg = "hipErrorInvalidPc";
case 719:
msg = "hipErrorLaunchFailure";
case 720:
msg = "cudaErrorCooperativeLaunchTooLarge";
case 800:
msg = "hipErrorNotPermitted";
case 801:
msg = "hipErrorNotSupported";
case 802:
msg = "cudaErrorSystemNotReady";
case 803:
msg = "cudaErrorSystemDriverMismatch";
case 804:
msg = "cudaErrorCompatNotSupportedOnDevice";
case 900:
msg = "cudaErrorStreamCaptureUnsupported";
case 901:
msg = "cudaErrorStreamCaptureInvalidated";
case 902:
msg = "cudaErrorStreamCaptureMerge";
case 903:
msg = "cudaErrorStreamCaptureUnmatched";
case 904:
msg = "cudaErrorStreamCaptureUnjoined";
case 905:
msg = "cudaErrorStreamCaptureIsolation";
case 906:
msg = "cudaErrorStreamCaptureImplicit";
case 907:
msg = "cudaErrorCapturedEvent";
case 908:
msg = "cudaErrorStreamCaptureWrongThread";
case 909:
msg = "cudaErrorTimeout";
case 910:
msg = "cudaErrorGraphExecUpdateFailure";
case 999:
msg = "hipErrorUnknown";
default:
msg = "NonValidCudaError";
}
return msg;
}
// CURAND error check to get error name
std::string
CURAND_CHECK_VAL(hiprandStatus_t x)
{
std::string msg;
switch (x) {
case 0:
msg = "HIPRAND_STATUS_SUCCESS";
case 100:
msg = "HIPRAND_STATUS_VERSION_MISMATCH";
case 101:
msg = "HIPRAND_STATUS_NOT_INITIALIZED";
case 102:
msg = "HIPRAND_STATUS_ALLOCATION_FAILED";
case 103:
msg = "HIPRAND_STATUS_TYPE_ERROR";
case 104:
msg = "HIPRAND_STATUS_OUT_OF_RANGE";
case 105:
msg = "HIPRAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
msg = "HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
msg = "HIPRAND_STATUS_LAUNCH_FAILURE";
case 202:
msg = "HIPRAND_STATUS_PREEXISTING_FAILURE";
case 203:
msg = "HIPRAND_STATUS_INITIALIZATION_FAILED";
case 204:
msg = "HIPRAND_STATUS_ARCH_MISMATCH";
case 999:
msg = "HIPRAND_STATUS_INTERNAL_ERROR";
default:
msg = "NON_VALID_CURAND_ERROR";
}
return msg;
}
// Check method for checking the error status of a CUDA call
#define CUDA_CALL(x) { if(x != hipSuccess){ cout << "Error: " << CUDA_CHECK_VAL(x) << " at " << __FILE__ << ":" << __LINE__ << endl; return EXIT_FAILURE;}}
// Check method for checking the error status of a cuRAND call
#define CURAND_CALL(x) {if(x != HIPRAND_STATUS_SUCCESS){ cout << "Error: " << CURAND_CHECK_VAL(x) << " at " << __FILE__ << ":" << __LINE__ << endl; return EXIT_FAILURE;}}
// The kernel, which runs on the GPU when called
__global__ void
kernel(const int* a, const int* b, int* c, const size_t N)
{
if (const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < N) c[idx] = a[idx] * b[idx];
}
__global__ void
make_float_larger(float* a, size_t N)
{
if (const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < N) a[idx] *= 100;
}
// Program to convert a float array to an integer array
__host__ void
f_to_i_array(int* dst, const float* src, const size_t n_elem)
{
for (int i = 0; i < n_elem; i++)
dst[i] = static_cast<int>(src[i] * 1000);
}
// Function for verifying the array generated by the kernel is correct
__host__ bool inline
check(const int* a, const int* b, const size_t size)
{
for (int x = 0; x < size; x++)
{
if (a[x] - b[x] > 1.0E-8)
return true;
}
return false;
}
__host__ double
cpu_second()
{
struct timeval tp;
gettimeofday(&tp, nullptr);
return (static_cast<double>(tp.tv_sec) + static_cast<double>(tp.tv_usec) * 1.e-6);
}
int
array_play()
{
float* dev_a;
hiprandGenerator_t test;
const size_t n_size = 50 * sizeof(float);
auto* a = static_cast<float*>(malloc(n_size));
auto* b = static_cast<float*>(malloc(n_size));
CUDA_CALL(hipMalloc(static_cast<float**>(&dev_a), n_size))
CURAND_CALL(hiprandCreateGenerator(&test, HIPRAND_RNG_PSEUDO_DEFAULT))
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(test, time(NULL)))
CURAND_CALL(hiprandGenerateUniform(test, dev_a, 50))
hipLaunchKernelGGL(( make_float_larger), dim3(1), dim3(50), 0, 0, dev_a, n_size);
CUDA_CALL(hipDeviceSynchronize())
CUDA_CALL(hipMemcpy(a, dev_a, n_size, hipMemcpyDeviceToHost))
memcpy(b, a, n_size);
if (memcmp(a, b, n_size) == 0) {
for (int i = 0; i < 50; i++)
cout << quick_reverse_sqrt(a[i]) << " = " << 1 / std::sqrt(b[i]) << endl;
}
else
printf("Error, arrays are not the same");
CURAND_CALL(hiprandDestroyGenerator(test))
CUDA_CALL(hipFree(dev_a))
free(a);
free(b);
return 0;
}
// Function for generating the same results as the GPU kernel, used for verification of results
__host__ void
KernelCPUEd(int* a, int* b, int* c, int begin, int end)
{
for (int i = begin; i < end; i++)
c[i] = a[i] * b[i];
}
// Function for partitioning the array into four equal parts and executing them
void
get_bounds_and_compute(int* a, int* b, int* c, size_t n_elem)
{
struct data {
struct partitionOne {
int begin = 0;
int end = 0;
} partition_one;
struct partitionTwo {
int begin = 0;
int end = 0;
} partition_two;
struct partitionThree {
int begin = 0;
int end = 0;
} partition_three;
struct partitionFour {
int begin = 0;
int end = 0;
} partition_four;
} data_struct;
const int partition_size = static_cast<signed int>(n_elem) / 4;
data_struct.partition_one.begin = 0;
data_struct.partition_one.end = (1 << static_cast<int>(log2(partition_size))) - 1;
data_struct.partition_two.begin = data_struct.partition_one.end + 1;
data_struct.partition_two.end = data_struct.partition_two.begin + partition_size - 1;
data_struct.partition_three.begin = data_struct.partition_two.end + 1;
data_struct.partition_three.end = data_struct.partition_three.begin + partition_size - 1;
data_struct.partition_four.begin = data_struct.partition_three.end + 1;
data_struct.partition_four.end = data_struct.partition_four.begin + partition_size - 1;
std::thread partition_one(KernelCPUEd, a, b, c, data_struct.partition_one.begin, data_struct.partition_one.end);
std::thread partition_two(KernelCPUEd, a, b, c, data_struct.partition_two.begin, data_struct.partition_two.end);
std::thread partition_three(KernelCPUEd, a, b, c, data_struct.partition_three.begin, data_struct.partition_three.end);
std::thread partition_four(KernelCPUEd, a, b, c, data_struct.partition_four.begin, data_struct.partition_four.end);
partition_one.join();
partition_two.join();
partition_three.join();
partition_four.join();
}
// Entry point to the program
int
main()
{
size_t nElem = 1 << 28;
size_t nBytes = nElem * sizeof(int);
size_t nBytesF = nElem * sizeof(float);
int* h_A, * h_B, * h_C, * GpuRef;
int* d_A, * d_B, * d_C;
float* devNumGen, * devNumGen2, * h_AR, * h_BR;
int dev = 0;
hipDeviceProp_t deviceProp;
CUDA_CALL(hipGetDeviceProperties(&deviceProp, dev))
printf("Using Device %d: %s\n", dev, deviceProp.name);
CUDA_CALL(hipSetDevice(dev))
hiprandGenerator_t gen, gen2;
// Allocation of memory on the host for transferring data from host to device and vice versa
h_A = static_cast<int*>(malloc(nBytes));
h_B = static_cast<int*>(malloc(nBytes));
h_C = static_cast<int*>(malloc(nBytes));
GpuRef = static_cast<int*>(malloc(nBytes));
// Allocation of memory on the device for storage of data needed by the kernel during runtime
CUDA_CALL(hipMalloc(static_cast<int**>(&d_A), nBytes))
CUDA_CALL(hipMalloc(static_cast<int**>(&d_B), nBytes))
CUDA_CALL(hipMalloc(static_cast<int**>(&d_C), nBytes))
// Allocation of memory on host and device for testing the CUDA number generator
h_AR = static_cast<float*>(malloc(nBytes));
h_BR = static_cast<float*>(malloc(nBytes));
CUDA_CALL(hipMalloc(static_cast<float**>(&devNumGen), nBytesF))
CUDA_CALL(hipMalloc(static_cast<float**>(&devNumGen2), nBytesF))
// CUDA number generator function calls and return values
CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT))
CURAND_CALL(hiprandCreateGenerator(&gen2, HIPRAND_RNG_PSEUDO_DEFAULT))
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, time(NULL)))
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen2, time(NULL) + 1))
CURAND_CALL(hiprandGenerateUniform(gen, devNumGen, nElem))
CURAND_CALL(hiprandGenerateUniform(gen2, devNumGen2, nElem))
// Transfer random numbers generated on device to host
CUDA_CALL(hipMemcpy(h_AR, devNumGen, nBytesF, hipMemcpyDeviceToHost))
CUDA_CALL(hipMemcpy(h_BR, devNumGen2, nBytesF, hipMemcpyDeviceToHost))
f_to_i_array(h_A, h_AR, nElem);
f_to_i_array(h_B, h_BR, nElem);
// Transfer of populated arrays to the device for use by the kernel
CUDA_CALL(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice))
CUDA_CALL(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice))
// Calculate block indices
int iLen = 1 << 8;
dim3 block(iLen, 1);
dim3 grid((nElem + block.x - 1) / block.x, 1);
// Kernel call to run the calculation n the GPU, uses 1 block and nElem amount of threads in the block
// Max threads in a block for RTX 2060 is 4096 threads
double iStart = cpu_second();
hipLaunchKernelGGL(( kernel) , dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem);
CUDA_CALL(hipDeviceSynchronize())
double iEnd = cpu_second() - iStart;
printf("Execution time of the GPU kernel <<<%d, %d>>>: %g\n", grid.x, block.x, iEnd);
// Verification function that the kernel on the GPU is performing properly
double iStartCPU = cpu_second();
get_bounds_and_compute(h_A, h_B, h_C, nElem);
double iEndCPU = cpu_second() - iStartCPU;
printf("Execution time of the CPU function %g\n", iEndCPU);
// Transfer of data from Device to the host
CUDA_CALL(hipDeviceSynchronize())
CUDA_CALL(hipMemcpy(GpuRef, d_C, nBytes, hipMemcpyDeviceToHost))
// Verification of data, compares data generated on the host to the data generated on the device
// If the data is different, goto Exit is called and memory is freed, the the program ends
if (check(h_C, GpuRef, nElem))
{
printf("The arrays are not the same\n");
}
// Destroy the cuRAND number generator
CURAND_CALL(hiprandDestroyGenerator(gen))
CURAND_CALL(hiprandDestroyGenerator(gen2))
//Free device memory
CUDA_CALL(hipFree(d_A))
CUDA_CALL(hipFree(d_B))
CUDA_CALL(hipFree(d_C))
CUDA_CALL(hipFree(devNumGen))
CUDA_CALL(hipFree(devNumGen2))
//Free host memory
free(h_A);
free(h_B);
free(h_C);
free(GpuRef);
free(h_AR);
free(h_BR);
// Allows for the user to see the output when running in Visual Studio Pro 2019 (v142)
char end;
printf("Press Enter to continue");
scanf_s("%c", &end);
return 0;
} | 9fa2d13a4a7bedcbf03bc77e8ac6e118e7f248a1.cu | // Inclusion of header files for running CUDA in Visual Studio Pro 2019 (v142)
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// Inclusion of the required CUDA libriaries and header files
#include <curand.h>
#include <cuda.h>
// Inclusion of headers from the standard library in C
#include <cstdio>
#include <cstdint> //uint32_t
#include <cstdlib>
#include <ctime>
#include <cmath>
// Inclusion if headers from the C++ STL
#include <any>
#include <iostream>
#include <string>
#include <thread>
using std::cout;
using std::endl;
#if _WIN32
// Windows implementation of the Linux sys/time.h fnuctions needed in this program
#include <sys/timeb.h>
#include <sys/types.h>
#include <winsock2.h>
#define __need_clock_t
/* Structure describing CPU time used by a process and its children. */
struct tms
{
clock_t tms_utime; /* User CPU time. */
clock_t tms_stime; /* System CPU time. */
clock_t tms_cutime; /* User CPU time of dead children. */
clock_t tms_cstime; /* System CPU time of dead children. */
};
// CUDA 8+ requirment
struct timezone
{
int tz_minuteswest; /* minutes west of Greenwich */
int tz_dsttime; /* type of DST correction */
};
int
gettimeofday(struct timeval* t, std::any timezone)
{
struct _timeb timebuffer;
_ftime(&timebuffer);
t->tv_sec = static_cast<long>(timebuffer.time);
t->tv_usec = 1000 * timebuffer.millitm;
return 0;
}
/* Store the CPU time used by this process and all its
dead children (and their dead children) in BUFFER.
Return the elapsed real time, or (clock_t) -1 for errors.
All times are in CLK_TCKths of a second. */
clock_t
times(struct tms* __buffer)
{
__buffer->tms_utime = clock();
__buffer->tms_stime = 0;
__buffer->tms_cstime = 0;
__buffer->tms_cutime = 0;
return __buffer->tms_utime;
}
typedef long long suseconds_t;
#else
#include <sys/time.h>
#endif
// Q_SQRT function using C
float
quick_reverse_sqrt(const float number)
{
const float x2 = number * 0.5F;
const float three_halves = 1.5F;
union {
float f;
uint32_t i;
} conv;
conv.f = number;
conv.i = 0x5f3759df - (conv.i >> 1);
conv.f *= three_halves - (x2 * conv.f * conv.f);
conv.f *= three_halves - (x2 * conv.f * conv.f);
conv.f *= three_halves - (x2 * conv.f * conv.f);
return conv.f;
}
// Error codes for any error thrown in the next function
enum QuickInverseSqrtError_t
{
QRSqrtSuccess = 0,
ArrayOutOfBoundsException = 1,
DivideByZero = 2,
UnknownError = 3,
NDoesNotMatchSizeOfArray = 4
};
// Run Q_rsqrt over an array
QuickInverseSqrtError_t
calcOverArray(float* array, size_t N)
{
if ((sizeof(*array) / sizeof(array[0])) != N) return NDoesNotMatchSizeOfArray;
for (int i = 0; i < static_cast<int>(N); i++) {
if (array[i] == 0) return DivideByZero;
array[i] = quick_reverse_sqrt(array[i]);
}
return QRSqrtSuccess;
}
// CUDA error check to get error name
std::string
CUDA_CHECK_VAL(cudaError_t x)
{
std::string msg;
switch (x) {
case 0:
msg = "cudaSuccess";
case 1:
msg = "cudaErrorInvalidValue";
case 2:
msg = "cudaErrorMemoryAllocation";
case 3:
msg = "cudaErrorInitializationError";
case 4:
msg = "cudaErrorCudartUnloading";
case 5:
msg = "cudaErrorProfilerDisabled";
case 9:
msg = "cudaErrorInvalidConfiguration";
case 12:
msg = "cudaErrorInvalidPitchValue";
case 13:
msg = "cudaErrorInvalidSymbol";
case 18:
msg = "cudaErrorInvalidTexture";
case 19:
msg = "cudaErrorInvalidTextureBinding";
case 20:
msg = "cudaErrorInvalidChannelDescriptor";
case 21:
msg = "cudaErrorInvalidMemcpyDirection";
case 26:
msg = "cudaErrorInvalidFilterSetting";
case 27:
msg = "cudaErrorInvalidNormSetting";
case 34:
msg = "cudaErrorStubLibrary";
case 35:
msg = "cudaErrorInsufficientDriver";
case 36:
msg = "cudaErrorCallRequiresNewerDriver";
case 37:
msg = "cudaErrorInvalidSurface";
case 43:
msg = "cudaErrorDuplicateVariableName";
case 44:
msg = "cudaErrorDuplicateTextureName";
case 45:
msg = "cudaErrorDuplicateSurfaceName";
case 46:
msg = "cudaErrorDevicesUnavailable";
case 49:
msg = "cudaErrorIncompatibleDriverContext";
case 52:
msg = "cudaErrorMissingConfiguration";
case 65:
msg = "cudaErrorLaunchMaxDepthExceeded";
case 66:
msg = "cudaErrorLaunchFileScopedTex";
case 67:
msg = "cudaErrorLaunchFileScopedSurf";
case 68:
msg = "cudaErrorSyncDepthExceeded";
case 69:
msg = "cudaErrorLaunchPendingCountExceeded";
case 98:
msg = "cudaErrorInvalidDeviceFunction";
case 100:
msg = "cudaErrorNoDevice";
case 101:
msg = "cudaErrorInvalidDevice";
case 102:
msg = "cudaErrorDeviceNotLicensed";
case 103:
msg = "cudaErrorSoftwareValidityNotEstablished";
case 127:
msg = "cudaErrorStartupFailure";
case 200:
msg = "cudaErrorInvalidKernelImage";
case 201:
msg = "cudaErrorDeviceUninitialized";
case 205:
msg = "cudaErrorMapBufferObjectFailed";
case 206:
msg = "cudaErrorUnmapBufferObjectFailed";
case 207:
msg = "cudaErrorArrayIsMapped";
case 208:
msg = "cudaErrorAlreadyMapped";
case 209:
msg = "cudaErrorNoKernelImageForDevice";
case 210:
msg = "cudaErrorAlreadyAcquired";
case 211:
msg = "cudaErrorNotMapped";
case 212:
msg = "cudaErrorNotMappedAsArray";
case 213:
msg = "cudaErrorNotMappedAsPointer";
case 214:
msg = "cudaErrorECCUncorrectable";
case 215:
msg = "cudaErrorUnsupportedLimit";
case 216:
msg = "cudaErrorDeviceAlreadyInUse";
case 217:
msg = "cudaErrorPeerAccessUnsupported";
case 218:
msg = "cudaErrorInvalidPtx";
case 219:
msg = "cudaErrorInvalidGraphicsContext";
case 220:
msg = "cudaErrorNvlinkUncorrectable";
case 221:
msg = "cudaErrorJitCompilerNotFound";
case 222:
msg = "cudaErrorUnsupportedPtxVersion";
case 223:
msg = "cudaErrorJitCompilationDisabled";
case 300:
msg = "cudaErrorInvalidSource";
case 301:
msg = "cudaErrorFileNotFound";
case 302:
msg = "cudaErrorSharedObjectSymbolNotFound";
case 303:
msg = "cudaErrorSharedObjectInitFailed";
case 304:
msg = "cudaErrorOperatingSystem";
case 400:
msg = "cudaErrorInvalidResourceHandle";
case 401:
msg = "cudaErrorIllegalState";
case 500:
msg = "cudaErrorSymbolNotFound";
case 600:
msg = "cudaErrorNotReady";
case 700:
msg = "cudaErrorIllegalAddress";
case 701:
msg = "cudaErrorLaunchOutOfResources";
case 702:
msg = "cudaErrorLaunchTimeout";
case 703:
msg = "cudaErrorLaunchIncompatibleTexturing";
case 704:
msg = "cudaErrorPeerAccessAlreadyEnabled";
case 705:
msg = "cudaErrorPeerAccessNotEnabled";
case 708:
msg = "cudaErrorSetOnActiveProcess";
case 709:
msg = "cudaErrorContextIsDestroyed";
case 710:
msg = "cudaErrorAssert";
case 711:
msg = "cudaErrorTooManyPeers";
case 712:
msg = "cudaErrorHostMemoryAlreadyRegistered";
case 713:
msg = "cudaErrorHostMemoryNotRegistered";
case 714:
msg = "cudaErrorHardwareStackError";
case 715:
msg = "cudaErrorIllegalInstruction";
case 716:
msg = "cudaErrorMisalignedAddress";
case 717:
msg = "cudaErrorInvalidAddressSpace";
case 718:
msg = "cudaErrorInvalidPc";
case 719:
msg = "cudaErrorLaunchFailure";
case 720:
msg = "cudaErrorCooperativeLaunchTooLarge";
case 800:
msg = "cudaErrorNotPermitted";
case 801:
msg = "cudaErrorNotSupported";
case 802:
msg = "cudaErrorSystemNotReady";
case 803:
msg = "cudaErrorSystemDriverMismatch";
case 804:
msg = "cudaErrorCompatNotSupportedOnDevice";
case 900:
msg = "cudaErrorStreamCaptureUnsupported";
case 901:
msg = "cudaErrorStreamCaptureInvalidated";
case 902:
msg = "cudaErrorStreamCaptureMerge";
case 903:
msg = "cudaErrorStreamCaptureUnmatched";
case 904:
msg = "cudaErrorStreamCaptureUnjoined";
case 905:
msg = "cudaErrorStreamCaptureIsolation";
case 906:
msg = "cudaErrorStreamCaptureImplicit";
case 907:
msg = "cudaErrorCapturedEvent";
case 908:
msg = "cudaErrorStreamCaptureWrongThread";
case 909:
msg = "cudaErrorTimeout";
case 910:
msg = "cudaErrorGraphExecUpdateFailure";
case 999:
msg = "cudaErrorUnknown";
default:
msg = "NonValidCudaError";
}
return msg;
}
// CURAND error check to get error name
std::string
CURAND_CHECK_VAL(curandStatus_t x)
{
std::string msg;
switch (x) {
case 0:
msg = "CURAND_STATUS_SUCCESS";
case 100:
msg = "CURAND_STATUS_VERSION_MISMATCH";
case 101:
msg = "CURAND_STATUS_NOT_INITIALIZED";
case 102:
msg = "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
msg = "CURAND_STATUS_TYPE_ERROR";
case 104:
msg = "CURAND_STATUS_OUT_OF_RANGE";
case 105:
msg = "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
msg = "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
msg = "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
msg = "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
msg = "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
msg = "CURAND_STATUS_ARCH_MISMATCH";
case 999:
msg = "CURAND_STATUS_INTERNAL_ERROR";
default:
msg = "NON_VALID_CURAND_ERROR";
}
return msg;
}
// Check method for checking the error status of a CUDA call
#define CUDA_CALL(x) { if(x != cudaSuccess){ cout << "Error: " << CUDA_CHECK_VAL(x) << " at " << __FILE__ << ":" << __LINE__ << endl; return EXIT_FAILURE;}}
// Check method for checking the error status of a cuRAND call
#define CURAND_CALL(x) {if(x != CURAND_STATUS_SUCCESS){ cout << "Error: " << CURAND_CHECK_VAL(x) << " at " << __FILE__ << ":" << __LINE__ << endl; return EXIT_FAILURE;}}
// The kernel, which runs on the GPU when called
__global__ void
kernel(const int* a, const int* b, int* c, const size_t N)
{
if (const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < N) c[idx] = a[idx] * b[idx];
}
__global__ void
make_float_larger(float* a, size_t N)
{
if (const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < N) a[idx] *= 100;
}
// Program to convert a float array to an integer array
__host__ void
f_to_i_array(int* dst, const float* src, const size_t n_elem)
{
for (int i = 0; i < n_elem; i++)
dst[i] = static_cast<int>(src[i] * 1000);
}
// Function for verifying the array generated by the kernel is correct
__host__ bool inline
check(const int* a, const int* b, const size_t size)
{
for (int x = 0; x < size; x++)
{
if (a[x] - b[x] > 1.0E-8)
return true;
}
return false;
}
__host__ double
cpu_second()
{
struct timeval tp;
gettimeofday(&tp, nullptr);
return (static_cast<double>(tp.tv_sec) + static_cast<double>(tp.tv_usec) * 1.e-6);
}
int
array_play()
{
float* dev_a;
curandGenerator_t test;
const size_t n_size = 50 * sizeof(float);
auto* a = static_cast<float*>(malloc(n_size));
auto* b = static_cast<float*>(malloc(n_size));
CUDA_CALL(cudaMalloc(static_cast<float**>(&dev_a), n_size))
CURAND_CALL(curandCreateGenerator(&test, CURAND_RNG_PSEUDO_DEFAULT))
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(test, time(NULL)))
CURAND_CALL(curandGenerateUniform(test, dev_a, 50))
make_float_larger<<<1, 50>>>(dev_a, n_size);
CUDA_CALL(cudaDeviceSynchronize())
CUDA_CALL(cudaMemcpy(a, dev_a, n_size, cudaMemcpyDeviceToHost))
memcpy(b, a, n_size);
if (memcmp(a, b, n_size) == 0) {
for (int i = 0; i < 50; i++)
cout << quick_reverse_sqrt(a[i]) << " = " << 1 / std::sqrt(b[i]) << endl;
}
else
printf("Error, arrays are not the same");
CURAND_CALL(curandDestroyGenerator(test))
CUDA_CALL(cudaFree(dev_a))
free(a);
free(b);
return 0;
}
// Function for generating the same results as the GPU kernel, used for verification of results
__host__ void
KernelCPUEd(int* a, int* b, int* c, int begin, int end)
{
for (int i = begin; i < end; i++)
c[i] = a[i] * b[i];
}
// Function for partitioning the array into four equal parts and executing them
void
get_bounds_and_compute(int* a, int* b, int* c, size_t n_elem)
{
struct data {
struct partitionOne {
int begin = 0;
int end = 0;
} partition_one;
struct partitionTwo {
int begin = 0;
int end = 0;
} partition_two;
struct partitionThree {
int begin = 0;
int end = 0;
} partition_three;
struct partitionFour {
int begin = 0;
int end = 0;
} partition_four;
} data_struct;
const int partition_size = static_cast<signed int>(n_elem) / 4;
data_struct.partition_one.begin = 0;
data_struct.partition_one.end = (1 << static_cast<int>(log2(partition_size))) - 1;
data_struct.partition_two.begin = data_struct.partition_one.end + 1;
data_struct.partition_two.end = data_struct.partition_two.begin + partition_size - 1;
data_struct.partition_three.begin = data_struct.partition_two.end + 1;
data_struct.partition_three.end = data_struct.partition_three.begin + partition_size - 1;
data_struct.partition_four.begin = data_struct.partition_three.end + 1;
data_struct.partition_four.end = data_struct.partition_four.begin + partition_size - 1;
std::thread partition_one(KernelCPUEd, a, b, c, data_struct.partition_one.begin, data_struct.partition_one.end);
std::thread partition_two(KernelCPUEd, a, b, c, data_struct.partition_two.begin, data_struct.partition_two.end);
std::thread partition_three(KernelCPUEd, a, b, c, data_struct.partition_three.begin, data_struct.partition_three.end);
std::thread partition_four(KernelCPUEd, a, b, c, data_struct.partition_four.begin, data_struct.partition_four.end);
partition_one.join();
partition_two.join();
partition_three.join();
partition_four.join();
}
// Entry point to the program
int
main()
{
size_t nElem = 1 << 28;
size_t nBytes = nElem * sizeof(int);
size_t nBytesF = nElem * sizeof(float);
int* h_A, * h_B, * h_C, * GpuRef;
int* d_A, * d_B, * d_C;
float* devNumGen, * devNumGen2, * h_AR, * h_BR;
int dev = 0;
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, dev))
printf("Using Device %d: %s\n", dev, deviceProp.name);
CUDA_CALL(cudaSetDevice(dev))
curandGenerator_t gen, gen2;
// Allocation of memory on the host for transferring data from host to device and vice versa
h_A = static_cast<int*>(malloc(nBytes));
h_B = static_cast<int*>(malloc(nBytes));
h_C = static_cast<int*>(malloc(nBytes));
GpuRef = static_cast<int*>(malloc(nBytes));
// Allocation of memory on the device for storage of data needed by the kernel during runtime
CUDA_CALL(cudaMalloc(static_cast<int**>(&d_A), nBytes))
CUDA_CALL(cudaMalloc(static_cast<int**>(&d_B), nBytes))
CUDA_CALL(cudaMalloc(static_cast<int**>(&d_C), nBytes))
// Allocation of memory on host and device for testing the CUDA number generator
h_AR = static_cast<float*>(malloc(nBytes));
h_BR = static_cast<float*>(malloc(nBytes));
CUDA_CALL(cudaMalloc(static_cast<float**>(&devNumGen), nBytesF))
CUDA_CALL(cudaMalloc(static_cast<float**>(&devNumGen2), nBytesF))
// CUDA number generator function calls and return values
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT))
CURAND_CALL(curandCreateGenerator(&gen2, CURAND_RNG_PSEUDO_DEFAULT))
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, time(NULL)))
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen2, time(NULL) + 1))
CURAND_CALL(curandGenerateUniform(gen, devNumGen, nElem))
CURAND_CALL(curandGenerateUniform(gen2, devNumGen2, nElem))
// Transfer random numbers generated on device to host
CUDA_CALL(cudaMemcpy(h_AR, devNumGen, nBytesF, cudaMemcpyDeviceToHost))
CUDA_CALL(cudaMemcpy(h_BR, devNumGen2, nBytesF, cudaMemcpyDeviceToHost))
f_to_i_array(h_A, h_AR, nElem);
f_to_i_array(h_B, h_BR, nElem);
// Transfer of populated arrays to the device for use by the kernel
CUDA_CALL(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice))
CUDA_CALL(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice))
// Calculate block indices
int iLen = 1 << 8;
dim3 block(iLen, 1);
dim3 grid((nElem + block.x - 1) / block.x, 1);
// Kernel call to run the calculation n the GPU, uses 1 block and nElem amount of threads in the block
// Max threads in a block for RTX 2060 is 4096 threads
double iStart = cpu_second();
kernel <<<grid, block>>> (d_A, d_B, d_C, nElem);
CUDA_CALL(cudaDeviceSynchronize())
double iEnd = cpu_second() - iStart;
printf("Execution time of the GPU kernel <<<%d, %d>>>: %g\n", grid.x, block.x, iEnd);
// Verification function that the kernel on the GPU is performing properly
double iStartCPU = cpu_second();
get_bounds_and_compute(h_A, h_B, h_C, nElem);
double iEndCPU = cpu_second() - iStartCPU;
printf("Execution time of the CPU function %g\n", iEndCPU);
// Transfer of data from Device to the host
CUDA_CALL(cudaDeviceSynchronize())
CUDA_CALL(cudaMemcpy(GpuRef, d_C, nBytes, cudaMemcpyDeviceToHost))
// Verification of data, compares data generated on the host to the data generated on the device
// If the data is different, goto Exit is called and memory is freed, the the program ends
if (check(h_C, GpuRef, nElem))
{
printf("The arrays are not the same\n");
}
// Destroy the cuRAND number generator
CURAND_CALL(curandDestroyGenerator(gen))
CURAND_CALL(curandDestroyGenerator(gen2))
//Free device memory
CUDA_CALL(cudaFree(d_A))
CUDA_CALL(cudaFree(d_B))
CUDA_CALL(cudaFree(d_C))
CUDA_CALL(cudaFree(devNumGen))
CUDA_CALL(cudaFree(devNumGen2))
//Free host memory
free(h_A);
free(h_B);
free(h_C);
free(GpuRef);
free(h_AR);
free(h_BR);
// Allows for the user to see the output when running in Visual Studio Pro 2019 (v142)
char end;
printf("Press Enter to continue");
scanf_s("%c", &end);
return 0;
} |
7d2da91e6d9bbf222afaf33188eb890a5ad3d0f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace default_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_A __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_B __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_factor __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_tau1 __attribute__((unused)) = params_.parameters[0];\
auto* _pp_var_tau2 __attribute__((unused)) = params_.parameters[1];\
auto* _pp_var_e __attribute__((unused)) = params_.parameters[2];\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
arb_value_type tp;
_pp_var_A[tid_] = 0.;
_pp_var_B[tid_] = 0.;
tp = _pp_var_tau1[tid_]*_pp_var_tau2[tid_]/(_pp_var_tau2[tid_]-_pp_var_tau1[tid_])*log(_pp_var_tau2[tid_]/_pp_var_tau1[tid_]);
_pp_var_factor[tid_] = 1.0/( -exp( -tp/_pp_var_tau1[tid_])+exp( -tp/_pp_var_tau2[tid_]));
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type a_1_, a_0_, ll0_, ll1_, ll2_, ll3_;
ll3_ = 0.;
ll2_ = 0.;
ll1_ = 0.;
ll0_ = 0.;
a_0_ = -1.0/_pp_var_tau1[tid_];
ll0_ = a_0_*dt;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_A[tid_] = _pp_var_A[tid_]*ll1_;
a_1_ = -1.0/_pp_var_tau2[tid_];
ll2_ = a_1_*dt;
ll3_ = ( 1.0+ 0.5*ll2_)/( 1.0- 0.5*ll2_);
_pp_var_B[tid_] = _pp_var_B[tid_]*ll3_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
unsigned lane_mask_ = arb::gpu::ballot(0xffffffff, tid_<n_);
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type current_ = 0;
arb_value_type i = 0;
i = (_pp_var_B[tid_]-_pp_var_A[tid_])*(v-_pp_var_e[tid_]);
current_ = i;
conductivity_ = _pp_var_B[tid_]-_pp_var_A[tid_];
::arb::gpu::reduce_by_key(_pp_var_weight[tid_]*conductivity_,_pp_var_vec_g, node_indexi_, lane_mask_);
::arb::gpu::reduce_by_key(_pp_var_weight[tid_]*current_,_pp_var_vec_i, node_indexi_, lane_mask_);
}
}
__global__
void apply_events(arb_mechanism_ppack params_, arb_deliverable_event_stream stream) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
if(tid_<stream.n_streams) {
auto begin = stream.events + stream.begin[tid_];
auto end = stream.events + stream.end[tid_];
for (auto p = begin; p<end; ++p) {
if (p->mech_id==_pp_var_mechanism_id) {
auto tid_ = p->mech_index;
auto weight = p->weight;
_pp_var_A[tid_] = _pp_var_A[tid_]+weight*_pp_var_factor[tid_];
_pp_var_B[tid_] = _pp_var_B[tid_]+weight*_pp_var_factor[tid_];
}
}
}
}
} // namespace
void mechanism_exp2syn_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
if (!p->multiplicity) return;
hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(2}), block_dim, 0, *p);
}
void mechanism_exp2syn_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_exp2syn_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_exp2syn_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_exp2syn_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_exp2syn_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* stream_ptr) {
auto n = stream_ptr->n_streams;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( apply_events), dim3(grid_dim), dim3(block_dim), 0, 0, *p, *stream_ptr);
}
} // namespace default_catalogue
} // namespace arb
| 7d2da91e6d9bbf222afaf33188eb890a5ad3d0f9.cu | #include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace default_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_A __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_B __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_factor __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_tau1 __attribute__((unused)) = params_.parameters[0];\
auto* _pp_var_tau2 __attribute__((unused)) = params_.parameters[1];\
auto* _pp_var_e __attribute__((unused)) = params_.parameters[2];\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
arb_value_type tp;
_pp_var_A[tid_] = 0.;
_pp_var_B[tid_] = 0.;
tp = _pp_var_tau1[tid_]*_pp_var_tau2[tid_]/(_pp_var_tau2[tid_]-_pp_var_tau1[tid_])*log(_pp_var_tau2[tid_]/_pp_var_tau1[tid_]);
_pp_var_factor[tid_] = 1.0/( -exp( -tp/_pp_var_tau1[tid_])+exp( -tp/_pp_var_tau2[tid_]));
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type a_1_, a_0_, ll0_, ll1_, ll2_, ll3_;
ll3_ = 0.;
ll2_ = 0.;
ll1_ = 0.;
ll0_ = 0.;
a_0_ = -1.0/_pp_var_tau1[tid_];
ll0_ = a_0_*dt;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_A[tid_] = _pp_var_A[tid_]*ll1_;
a_1_ = -1.0/_pp_var_tau2[tid_];
ll2_ = a_1_*dt;
ll3_ = ( 1.0+ 0.5*ll2_)/( 1.0- 0.5*ll2_);
_pp_var_B[tid_] = _pp_var_B[tid_]*ll3_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
unsigned lane_mask_ = arb::gpu::ballot(0xffffffff, tid_<n_);
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type current_ = 0;
arb_value_type i = 0;
i = (_pp_var_B[tid_]-_pp_var_A[tid_])*(v-_pp_var_e[tid_]);
current_ = i;
conductivity_ = _pp_var_B[tid_]-_pp_var_A[tid_];
::arb::gpu::reduce_by_key(_pp_var_weight[tid_]*conductivity_,_pp_var_vec_g, node_indexi_, lane_mask_);
::arb::gpu::reduce_by_key(_pp_var_weight[tid_]*current_,_pp_var_vec_i, node_indexi_, lane_mask_);
}
}
__global__
void apply_events(arb_mechanism_ppack params_, arb_deliverable_event_stream stream) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
if(tid_<stream.n_streams) {
auto begin = stream.events + stream.begin[tid_];
auto end = stream.events + stream.end[tid_];
for (auto p = begin; p<end; ++p) {
if (p->mech_id==_pp_var_mechanism_id) {
auto tid_ = p->mech_index;
auto weight = p->weight;
_pp_var_A[tid_] = _pp_var_A[tid_]+weight*_pp_var_factor[tid_];
_pp_var_B[tid_] = _pp_var_B[tid_]+weight*_pp_var_factor[tid_];
}
}
}
}
} // namespace
void mechanism_exp2syn_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
init<<<grid_dim, block_dim>>>(*p);
if (!p->multiplicity) return;
multiply<<<dim3{grid_dim, 2}, block_dim>>>(*p);
}
void mechanism_exp2syn_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
compute_currents<<<grid_dim, block_dim>>>(*p);
}
void mechanism_exp2syn_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
advance_state<<<grid_dim, block_dim>>>(*p);
}
void mechanism_exp2syn_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_exp2syn_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_exp2syn_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* stream_ptr) {
auto n = stream_ptr->n_streams;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
apply_events<<<grid_dim, block_dim>>>(*p, *stream_ptr);
}
} // namespace default_catalogue
} // namespace arb
|
15ddaa078776a0f8f910e558f2f8cbb97df19a2d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernScanByLevel(const int n, const int offset, int* odata, const int* idata) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) return;
if (index >= offset) {
odata[index] = idata[index] + idata[index - offset];
} else { //final result already found for this position
odata[index] = idata[index];
}
}
__global__ void kernConvertToExclusiveScan(const int n, int* odata, const int* idata) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) return;
if (index == 0) {
odata[index] = 0;
} else {
odata[index] = idata[index - 1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(const int n, int *odata, const int *idata) {
const int numbytes = n * sizeof(int);
int* dev_odata;
hipMalloc((void**)&dev_odata, numbytes);
checkCUDAError("hipMalloc dev_odata failed!");
int* dev_idata;
hipMalloc((void**)&dev_idata, numbytes);
checkCUDAError("hipMalloc dev_idata failed!");
hipMemcpy(dev_idata, idata, numbytes, hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy from idata to dev_idata failed!");
const int gridDim = (n + blockSize - 1) / blockSize;
const int blockDim = blockSize;
timer().startGpuTimer();
for (int offset = 1; offset < n; offset <<= 1) {
//gridDims.x can probably = (n + blockSize - 1 - offset) / blockSize;
hipLaunchKernelGGL(( kernScanByLevel), dim3(gridDim), dim3(blockDim), 0, 0, n, offset, dev_odata, dev_idata);
std::swap(dev_odata, dev_idata);
}
//result is inclusive scan (includes the final reduction sum)
//shift left and odata[0] = 0 to get exclusive scan (identity at index 0 and remove final reduction sum)
hipLaunchKernelGGL(( kernConvertToExclusiveScan), dim3(gridDim), dim3(blockDim), 0, 0, n, dev_odata, dev_idata);
timer().endGpuTimer();
hipMemcpy(odata, dev_odata, numbytes, hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy from dev_odata to odata failed!");
hipFree(dev_idata);
checkCUDAError("hipFree of dev_idata failed!");
hipFree(dev_odata);
checkCUDAError("hipFree of dev_odata failed!");
}
}
} | 15ddaa078776a0f8f910e558f2f8cbb97df19a2d.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernScanByLevel(const int n, const int offset, int* odata, const int* idata) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) return;
if (index >= offset) {
odata[index] = idata[index] + idata[index - offset];
} else { //final result already found for this position
odata[index] = idata[index];
}
}
__global__ void kernConvertToExclusiveScan(const int n, int* odata, const int* idata) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) return;
if (index == 0) {
odata[index] = 0;
} else {
odata[index] = idata[index - 1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(const int n, int *odata, const int *idata) {
const int numbytes = n * sizeof(int);
int* dev_odata;
cudaMalloc((void**)&dev_odata, numbytes);
checkCUDAError("cudaMalloc dev_odata failed!");
int* dev_idata;
cudaMalloc((void**)&dev_idata, numbytes);
checkCUDAError("cudaMalloc dev_idata failed!");
cudaMemcpy(dev_idata, idata, numbytes, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy from idata to dev_idata failed!");
const int gridDim = (n + blockSize - 1) / blockSize;
const int blockDim = blockSize;
timer().startGpuTimer();
for (int offset = 1; offset < n; offset <<= 1) {
//gridDims.x can probably = (n + blockSize - 1 - offset) / blockSize;
kernScanByLevel<<<gridDim, blockDim>>>(n, offset, dev_odata, dev_idata);
std::swap(dev_odata, dev_idata);
}
//result is inclusive scan (includes the final reduction sum)
//shift left and odata[0] = 0 to get exclusive scan (identity at index 0 and remove final reduction sum)
kernConvertToExclusiveScan<<<gridDim, blockDim>>>(n, dev_odata, dev_idata);
timer().endGpuTimer();
cudaMemcpy(odata, dev_odata, numbytes, cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy from dev_odata to odata failed!");
cudaFree(dev_idata);
checkCUDAError("cudaFree of dev_idata failed!");
cudaFree(dev_odata);
checkCUDAError("cudaFree of dev_odata failed!");
}
}
} |
3848da6bee3101f759ff4a59c770a61219cbe44c.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2019 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../helpers.h"
#include "gtest/gtest.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
#include "../../../src/tree/constraints.cuh"
namespace xgboost {
namespace tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::SaveCudaContext{
[&]() {
dh::safe_cuda(hipSetDevice(0));
constexpr size_t kNBins = 128;
constexpr size_t kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogram<GradientPairPrecise, kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Erase existing nidx_map.
for (size_t i = kNNodes; i < kNNodes * 2; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_FALSE(histogram.HistogramExists(i));
}
}
};
}
template <typename GradientSumT>
void BuildGidx(DeviceShard<GradientSumT>* shard, int n_rows, int n_cols,
bst_float sparsity=0) {
auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3);
const SparsePage& batch = *(*dmat)->GetRowBatches().begin();
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
auto is_dense = (*dmat)->Info().num_nonzero_ ==
(*dmat)->Info().num_row_ * (*dmat)->Info().num_col_;
size_t row_stride = 0;
const auto &offset_vec = batch.offset.ConstHostVector();
for (size_t i = 1; i < offset_vec.size(); ++i) {
row_stride = ::max(row_stride, offset_vec[i] - offset_vec[i-1]);
}
shard->InitCompressedData(cmat, row_stride, is_dense);
shard->CreateHistIndices(
batch, cmat, RowStateOnDevice(batch.Size(), batch.Size()), -1);
delete dmat;
}
TEST(GpuHist, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
tree::TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientPairPrecise> shard(0, 0, 0, kNRows, param, kNCols, kNCols);
BuildGidx(&shard, kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_EQ(shard.ellpack_matrix.row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(GpuHist, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientPairPrecise> shard(0, 0, 0, kNRows, param, kNCols,
kNCols);
BuildGidx(&shard, kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(shard.ellpack_matrix.row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * shard.ellpack_matrix.row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientSumT> shard(0, 0, 0, kNRows, param, kNCols,
kNCols);
BuildGidx(&shard, kNRows, kNCols);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(kNRows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gpair = GradientPair(grad, hess);
}
int num_symbols = shard.n_bins + 1;
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (
shard.gidx_buffer.size());
common::CompressedByteT* d_gidx_buffer_ptr = shard.gidx_buffer.data();
dh::safe_cuda(hipMemcpy(h_gidx_buffer.data(), d_gidx_buffer_ptr,
sizeof(common::CompressedByteT) * shard.gidx_buffer.size(),
hipMemcpyDeviceToHost));
auto gidx = common::CompressedIterator<uint32_t>(h_gidx_buffer.data(),
num_symbols);
shard.ridx_segments.resize(1);
shard.ridx_segments[0] = Segment(0, kNRows);
shard.hist.AllocateHistogram(0);
dh::CopyVectorToDeviceSpan(shard.gpair, h_gpair);
thrust::sequence(
thrust::device_pointer_cast(shard.ridx.Current()),
thrust::device_pointer_cast(shard.ridx.Current() + shard.ridx.Size()));
shard.use_shared_memory_histograms = use_shared_memory_histograms;
shard.BuildHist(0);
DeviceHistogram<GradientSumT> d_hist = shard.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(hipMemcpy(h_result.data(), node_histogram.data(), data_size,
hipMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
TestBuildHist<GradientPair>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
TestBuildHist<GradientPair>(true);
}
common::HistCutMatrix GetHostCutMatrix () {
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"}
};
param.Init(args);
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize DeviceShard
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard{
new DeviceShard<GradientPairPrecise>(0, 0, 0, kNRows, param, kNCols,
kNCols)};
// Initialize DeviceShard::node_sum_gradients
shard->node_sum_gradients = {{6.4f, 12.8f}};
// Initialize DeviceShard::cut
common::HistCutMatrix cmat = GetHostCutMatrix();
// Copy cut matrix to device.
shard->ba.Allocate(0,
&(shard->feature_segments), cmat.row_ptr.size(),
&(shard->min_fvalue), cmat.min_val.size(),
&(shard->gidx_fvalue_map), 24,
&(shard->monotone_constraints), kNCols);
dh::CopyVectorToDeviceSpan(shard->feature_segments, cmat.row_ptr);
dh::CopyVectorToDeviceSpan(shard->gidx_fvalue_map, cmat.cut);
dh::CopyVectorToDeviceSpan(shard->monotone_constraints,
param.monotone_constraints);
shard->ellpack_matrix.feature_segments = shard->feature_segments;
shard->ellpack_matrix.gidx_fvalue_map = shard->gidx_fvalue_map;
dh::CopyVectorToDeviceSpan(shard->min_fvalue, cmat.min_val);
shard->ellpack_matrix.min_fvalue = shard->min_fvalue;
// Initialize DeviceShard::hist
shard->hist.Init(0, (max_bins - 1) * kNCols);
shard->hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(shard->hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
shard->hist.Data().begin());
shard->column_sampler.Init(kNCols,
param.colsample_bynode,
param.colsample_bylevel,
param.colsample_bytree,
false);
RegTree tree;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
shard->node_value_constraints.resize(1);
shard->node_value_constraints[0].lower_bound = -1.0;
shard->node_value_constraints[0].upper_bound = 1.0;
std::vector<DeviceSplitCandidate> res =
shard->EvaluateSplits({ 0,0 }, tree, kNCols);
ASSERT_EQ(res[0].findex, 7);
ASSERT_EQ(res[1].findex, 7);
ASSERT_NEAR(res[0].fvalue, 0.26, xgboost::kRtEps);
ASSERT_NEAR(res[1].fvalue, 0.26, xgboost::kRtEps);
}
TEST(GpuHist, ApplySplit) {
int constexpr kNId = 0;
int constexpr kNRows = 16;
int constexpr kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args = {};
param.InitAllowUnknown(args);
// Initialize shard
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard{
new DeviceShard<GradientPairPrecise>(0, 0, 0, kNRows, param, kNCols,
kNCols)};
shard->ridx_segments.resize(3); // 3 nodes.
shard->node_sum_gradients.resize(3);
shard->ridx_segments[0] = Segment(0, kNRows);
shard->ba.Allocate(0, &(shard->ridx), kNRows,
&(shard->position), kNRows);
shard->ellpack_matrix.row_stride = kNCols;
thrust::sequence(
thrust::device_pointer_cast(shard->ridx.Current()),
thrust::device_pointer_cast(shard->ridx.Current() + shard->ridx.Size()));
RegTree tree;
DeviceSplitCandidate candidate;
candidate.Update(2, kLeftDir,
0.59, 4, // fvalue has to be equal to one of the cut field
GradientPair(8.2, 2.8), GradientPair(6.3, 3.6),
GPUTrainingParam(param));
ExpandEntry candidate_entry {0, 0, candidate, 0};
candidate_entry.nid = kNId;
// Used to get bin_id in update position.
common::HistCutMatrix cmat = GetHostCutMatrix();
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
info.num_nonzero_ = kNRows * kNCols; // Dense
// Initialize gidx
int n_bins = 24;
int row_stride = kNCols;
int num_symbols = n_bins + 1;
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * kNRows,
num_symbols);
shard->ba.Allocate(0, &(shard->gidx_buffer), compressed_size_bytes,
&(shard->feature_segments), cmat.row_ptr.size(),
&(shard->min_fvalue), cmat.min_val.size(),
&(shard->gidx_fvalue_map), 24);
dh::CopyVectorToDeviceSpan(shard->feature_segments, cmat.row_ptr);
dh::CopyVectorToDeviceSpan(shard->gidx_fvalue_map, cmat.cut);
shard->ellpack_matrix.feature_segments = shard->feature_segments;
shard->ellpack_matrix.gidx_fvalue_map = shard->gidx_fvalue_map;
dh::CopyVectorToDeviceSpan(shard->min_fvalue, cmat.min_val);
shard->ellpack_matrix.min_fvalue = shard->min_fvalue;
shard->ellpack_matrix.is_dense = true;
common::CompressedBufferWriter wr(num_symbols);
// gidx 14 should go right, 12 goes left
std::vector<int> h_gidx (kNRows * row_stride, 14);
h_gidx[4] = 12;
h_gidx[12] = 12;
std::vector<common::CompressedByteT> h_gidx_compressed (compressed_size_bytes);
wr.Write(h_gidx_compressed.data(), h_gidx.begin(), h_gidx.end());
dh::CopyVectorToDeviceSpan(shard->gidx_buffer, h_gidx_compressed);
shard->ellpack_matrix.gidx_iter = common::CompressedIterator<uint32_t>(
shard->gidx_buffer.data(), num_symbols);
shard->ApplySplit(candidate_entry, &tree);
shard->UpdatePosition(candidate_entry.nid, tree[candidate_entry.nid]);
ASSERT_FALSE(tree[kNId].IsLeaf());
int left_nidx = tree[kNId].LeftChild();
int right_nidx = tree[kNId].RightChild();
ASSERT_EQ(shard->ridx_segments[left_nidx].begin, 0);
ASSERT_EQ(shard->ridx_segments[left_nidx].end, 2);
ASSERT_EQ(shard->ridx_segments[right_nidx].begin, 2);
ASSERT_EQ(shard->ridx_segments[right_nidx].end, 16);
}
void TestSortPosition(const std::vector<int>& position_in, int left_idx,
int right_idx) {
std::vector<int64_t> left_count = {
std::count(position_in.begin(), position_in.end(), left_idx)};
thrust::device_vector<int64_t> d_left_count = left_count;
thrust::device_vector<int> position = position_in;
thrust::device_vector<int> position_out(position.size());
thrust::device_vector<bst_uint> ridx(position.size());
thrust::sequence(ridx.begin(), ridx.end());
thrust::device_vector<bst_uint> ridx_out(ridx.size());
dh::CubMemory tmp;
SortPosition(
&tmp, common::Span<int>(position.data().get(), position.size()),
common::Span<int>(position_out.data().get(), position_out.size()),
common::Span<bst_uint>(ridx.data().get(), ridx.size()),
common::Span<bst_uint>(ridx_out.data().get(), ridx_out.size()), left_idx,
right_idx, d_left_count.data().get(), nullptr);
thrust::host_vector<int> position_result = position_out;
thrust::host_vector<int> ridx_result = ridx_out;
// Check position is sorted
EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end()));
// Check row indices are sorted inside left and right segment
EXPECT_TRUE(
std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0]));
EXPECT_TRUE(
std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end()));
// Check key value pairs are the same
for (auto i = 0ull; i < ridx_result.size(); i++) {
EXPECT_EQ(position_result[i], position_in[ridx_result[i]]);
}
}
TEST(GpuHist, SortPosition) {
TestSortPosition({1, 2, 1, 2, 1}, 1, 2);
TestSortPosition({1, 1, 1, 1}, 1, 2);
TestSortPosition({2, 2, 2, 2}, 1, 2);
TestSortPosition({1, 2, 1, 2, 3}, 1, 2);
}
void TestHistogramIndexImpl(int n_gpus) {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker, hist_maker_ext;
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true));
std::vector<std::pair<std::string, std::string>> training_params = {
{"max_depth", "10"},
{"max_leaves", "0"}
};
LearnerTrainParam learner_param(CreateEmptyGenericParam(0, n_gpus));
hist_maker.Init(training_params, &learner_param);
hist_maker.InitDataOnce(hist_maker_dmat.get());
hist_maker_ext.Init(training_params, &learner_param);
hist_maker_ext.InitDataOnce(hist_maker_ext_dmat.get());
ASSERT_EQ(hist_maker.shards_.size(), hist_maker_ext.shards_.size());
// Extract the device shards from the histogram makers and from that its compressed
// histogram index
for (size_t i = 0; i < hist_maker.shards_.size(); ++i) {
const auto &dev_shard = hist_maker.shards_[i];
std::vector<common::CompressedByteT> h_gidx_buffer(dev_shard->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, dev_shard->gidx_buffer);
const auto &dev_shard_ext = hist_maker_ext.shards_[i];
std::vector<common::CompressedByteT> h_gidx_buffer_ext(dev_shard_ext->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer_ext, dev_shard_ext->gidx_buffer);
ASSERT_EQ(dev_shard->n_bins, dev_shard_ext->n_bins);
ASSERT_EQ(dev_shard->gidx_buffer.size(), dev_shard_ext->gidx_buffer.size());
ASSERT_EQ(h_gidx_buffer, h_gidx_buffer_ext);
}
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl(1);
}
#if defined(XGBOOST_USE_NCCL)
TEST(GpuHist, MGPU_TestHistogramIndex) {
auto devices = GPUSet::AllVisible();
CHECK_GT(devices.Size(), 1);
TestHistogramIndexImpl(-1);
}
#endif
} // namespace tree
} // namespace xgboost
| 3848da6bee3101f759ff4a59c770a61219cbe44c.cu | /*!
* Copyright 2017-2019 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../helpers.h"
#include "gtest/gtest.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
#include "../../../src/tree/constraints.cuh"
namespace xgboost {
namespace tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::SaveCudaContext{
[&]() {
dh::safe_cuda(cudaSetDevice(0));
constexpr size_t kNBins = 128;
constexpr size_t kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogram<GradientPairPrecise, kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Erase existing nidx_map.
for (size_t i = kNNodes; i < kNNodes * 2; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_FALSE(histogram.HistogramExists(i));
}
}
};
}
template <typename GradientSumT>
void BuildGidx(DeviceShard<GradientSumT>* shard, int n_rows, int n_cols,
bst_float sparsity=0) {
auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3);
const SparsePage& batch = *(*dmat)->GetRowBatches().begin();
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
auto is_dense = (*dmat)->Info().num_nonzero_ ==
(*dmat)->Info().num_row_ * (*dmat)->Info().num_col_;
size_t row_stride = 0;
const auto &offset_vec = batch.offset.ConstHostVector();
for (size_t i = 1; i < offset_vec.size(); ++i) {
row_stride = std::max(row_stride, offset_vec[i] - offset_vec[i-1]);
}
shard->InitCompressedData(cmat, row_stride, is_dense);
shard->CreateHistIndices(
batch, cmat, RowStateOnDevice(batch.Size(), batch.Size()), -1);
delete dmat;
}
TEST(GpuHist, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
tree::TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientPairPrecise> shard(0, 0, 0, kNRows, param, kNCols, kNCols);
BuildGidx(&shard, kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_EQ(shard.ellpack_matrix.row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(GpuHist, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientPairPrecise> shard(0, 0, 0, kNRows, param, kNCols,
kNCols);
BuildGidx(&shard, kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(shard.ellpack_matrix.row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * shard.ellpack_matrix.row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientSumT> shard(0, 0, 0, kNRows, param, kNCols,
kNCols);
BuildGidx(&shard, kNRows, kNCols);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(kNRows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gpair = GradientPair(grad, hess);
}
int num_symbols = shard.n_bins + 1;
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (
shard.gidx_buffer.size());
common::CompressedByteT* d_gidx_buffer_ptr = shard.gidx_buffer.data();
dh::safe_cuda(cudaMemcpy(h_gidx_buffer.data(), d_gidx_buffer_ptr,
sizeof(common::CompressedByteT) * shard.gidx_buffer.size(),
cudaMemcpyDeviceToHost));
auto gidx = common::CompressedIterator<uint32_t>(h_gidx_buffer.data(),
num_symbols);
shard.ridx_segments.resize(1);
shard.ridx_segments[0] = Segment(0, kNRows);
shard.hist.AllocateHistogram(0);
dh::CopyVectorToDeviceSpan(shard.gpair, h_gpair);
thrust::sequence(
thrust::device_pointer_cast(shard.ridx.Current()),
thrust::device_pointer_cast(shard.ridx.Current() + shard.ridx.Size()));
shard.use_shared_memory_histograms = use_shared_memory_histograms;
shard.BuildHist(0);
DeviceHistogram<GradientSumT> d_hist = shard.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), data_size,
cudaMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
TestBuildHist<GradientPair>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
TestBuildHist<GradientPair>(true);
}
common::HistCutMatrix GetHostCutMatrix () {
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"}
};
param.Init(args);
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize DeviceShard
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard{
new DeviceShard<GradientPairPrecise>(0, 0, 0, kNRows, param, kNCols,
kNCols)};
// Initialize DeviceShard::node_sum_gradients
shard->node_sum_gradients = {{6.4f, 12.8f}};
// Initialize DeviceShard::cut
common::HistCutMatrix cmat = GetHostCutMatrix();
// Copy cut matrix to device.
shard->ba.Allocate(0,
&(shard->feature_segments), cmat.row_ptr.size(),
&(shard->min_fvalue), cmat.min_val.size(),
&(shard->gidx_fvalue_map), 24,
&(shard->monotone_constraints), kNCols);
dh::CopyVectorToDeviceSpan(shard->feature_segments, cmat.row_ptr);
dh::CopyVectorToDeviceSpan(shard->gidx_fvalue_map, cmat.cut);
dh::CopyVectorToDeviceSpan(shard->monotone_constraints,
param.monotone_constraints);
shard->ellpack_matrix.feature_segments = shard->feature_segments;
shard->ellpack_matrix.gidx_fvalue_map = shard->gidx_fvalue_map;
dh::CopyVectorToDeviceSpan(shard->min_fvalue, cmat.min_val);
shard->ellpack_matrix.min_fvalue = shard->min_fvalue;
// Initialize DeviceShard::hist
shard->hist.Init(0, (max_bins - 1) * kNCols);
shard->hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(shard->hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
shard->hist.Data().begin());
shard->column_sampler.Init(kNCols,
param.colsample_bynode,
param.colsample_bylevel,
param.colsample_bytree,
false);
RegTree tree;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
shard->node_value_constraints.resize(1);
shard->node_value_constraints[0].lower_bound = -1.0;
shard->node_value_constraints[0].upper_bound = 1.0;
std::vector<DeviceSplitCandidate> res =
shard->EvaluateSplits({ 0,0 }, tree, kNCols);
ASSERT_EQ(res[0].findex, 7);
ASSERT_EQ(res[1].findex, 7);
ASSERT_NEAR(res[0].fvalue, 0.26, xgboost::kRtEps);
ASSERT_NEAR(res[1].fvalue, 0.26, xgboost::kRtEps);
}
TEST(GpuHist, ApplySplit) {
int constexpr kNId = 0;
int constexpr kNRows = 16;
int constexpr kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args = {};
param.InitAllowUnknown(args);
// Initialize shard
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard{
new DeviceShard<GradientPairPrecise>(0, 0, 0, kNRows, param, kNCols,
kNCols)};
shard->ridx_segments.resize(3); // 3 nodes.
shard->node_sum_gradients.resize(3);
shard->ridx_segments[0] = Segment(0, kNRows);
shard->ba.Allocate(0, &(shard->ridx), kNRows,
&(shard->position), kNRows);
shard->ellpack_matrix.row_stride = kNCols;
thrust::sequence(
thrust::device_pointer_cast(shard->ridx.Current()),
thrust::device_pointer_cast(shard->ridx.Current() + shard->ridx.Size()));
RegTree tree;
DeviceSplitCandidate candidate;
candidate.Update(2, kLeftDir,
0.59, 4, // fvalue has to be equal to one of the cut field
GradientPair(8.2, 2.8), GradientPair(6.3, 3.6),
GPUTrainingParam(param));
ExpandEntry candidate_entry {0, 0, candidate, 0};
candidate_entry.nid = kNId;
// Used to get bin_id in update position.
common::HistCutMatrix cmat = GetHostCutMatrix();
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
info.num_nonzero_ = kNRows * kNCols; // Dense
// Initialize gidx
int n_bins = 24;
int row_stride = kNCols;
int num_symbols = n_bins + 1;
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * kNRows,
num_symbols);
shard->ba.Allocate(0, &(shard->gidx_buffer), compressed_size_bytes,
&(shard->feature_segments), cmat.row_ptr.size(),
&(shard->min_fvalue), cmat.min_val.size(),
&(shard->gidx_fvalue_map), 24);
dh::CopyVectorToDeviceSpan(shard->feature_segments, cmat.row_ptr);
dh::CopyVectorToDeviceSpan(shard->gidx_fvalue_map, cmat.cut);
shard->ellpack_matrix.feature_segments = shard->feature_segments;
shard->ellpack_matrix.gidx_fvalue_map = shard->gidx_fvalue_map;
dh::CopyVectorToDeviceSpan(shard->min_fvalue, cmat.min_val);
shard->ellpack_matrix.min_fvalue = shard->min_fvalue;
shard->ellpack_matrix.is_dense = true;
common::CompressedBufferWriter wr(num_symbols);
// gidx 14 should go right, 12 goes left
std::vector<int> h_gidx (kNRows * row_stride, 14);
h_gidx[4] = 12;
h_gidx[12] = 12;
std::vector<common::CompressedByteT> h_gidx_compressed (compressed_size_bytes);
wr.Write(h_gidx_compressed.data(), h_gidx.begin(), h_gidx.end());
dh::CopyVectorToDeviceSpan(shard->gidx_buffer, h_gidx_compressed);
shard->ellpack_matrix.gidx_iter = common::CompressedIterator<uint32_t>(
shard->gidx_buffer.data(), num_symbols);
shard->ApplySplit(candidate_entry, &tree);
shard->UpdatePosition(candidate_entry.nid, tree[candidate_entry.nid]);
ASSERT_FALSE(tree[kNId].IsLeaf());
int left_nidx = tree[kNId].LeftChild();
int right_nidx = tree[kNId].RightChild();
ASSERT_EQ(shard->ridx_segments[left_nidx].begin, 0);
ASSERT_EQ(shard->ridx_segments[left_nidx].end, 2);
ASSERT_EQ(shard->ridx_segments[right_nidx].begin, 2);
ASSERT_EQ(shard->ridx_segments[right_nidx].end, 16);
}
void TestSortPosition(const std::vector<int>& position_in, int left_idx,
int right_idx) {
std::vector<int64_t> left_count = {
std::count(position_in.begin(), position_in.end(), left_idx)};
thrust::device_vector<int64_t> d_left_count = left_count;
thrust::device_vector<int> position = position_in;
thrust::device_vector<int> position_out(position.size());
thrust::device_vector<bst_uint> ridx(position.size());
thrust::sequence(ridx.begin(), ridx.end());
thrust::device_vector<bst_uint> ridx_out(ridx.size());
dh::CubMemory tmp;
SortPosition(
&tmp, common::Span<int>(position.data().get(), position.size()),
common::Span<int>(position_out.data().get(), position_out.size()),
common::Span<bst_uint>(ridx.data().get(), ridx.size()),
common::Span<bst_uint>(ridx_out.data().get(), ridx_out.size()), left_idx,
right_idx, d_left_count.data().get(), nullptr);
thrust::host_vector<int> position_result = position_out;
thrust::host_vector<int> ridx_result = ridx_out;
// Check position is sorted
EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end()));
// Check row indices are sorted inside left and right segment
EXPECT_TRUE(
std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0]));
EXPECT_TRUE(
std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end()));
// Check key value pairs are the same
for (auto i = 0ull; i < ridx_result.size(); i++) {
EXPECT_EQ(position_result[i], position_in[ridx_result[i]]);
}
}
TEST(GpuHist, SortPosition) {
TestSortPosition({1, 2, 1, 2, 1}, 1, 2);
TestSortPosition({1, 1, 1, 1}, 1, 2);
TestSortPosition({2, 2, 2, 2}, 1, 2);
TestSortPosition({1, 2, 1, 2, 3}, 1, 2);
}
void TestHistogramIndexImpl(int n_gpus) {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker, hist_maker_ext;
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true));
std::vector<std::pair<std::string, std::string>> training_params = {
{"max_depth", "10"},
{"max_leaves", "0"}
};
LearnerTrainParam learner_param(CreateEmptyGenericParam(0, n_gpus));
hist_maker.Init(training_params, &learner_param);
hist_maker.InitDataOnce(hist_maker_dmat.get());
hist_maker_ext.Init(training_params, &learner_param);
hist_maker_ext.InitDataOnce(hist_maker_ext_dmat.get());
ASSERT_EQ(hist_maker.shards_.size(), hist_maker_ext.shards_.size());
// Extract the device shards from the histogram makers and from that its compressed
// histogram index
for (size_t i = 0; i < hist_maker.shards_.size(); ++i) {
const auto &dev_shard = hist_maker.shards_[i];
std::vector<common::CompressedByteT> h_gidx_buffer(dev_shard->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, dev_shard->gidx_buffer);
const auto &dev_shard_ext = hist_maker_ext.shards_[i];
std::vector<common::CompressedByteT> h_gidx_buffer_ext(dev_shard_ext->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer_ext, dev_shard_ext->gidx_buffer);
ASSERT_EQ(dev_shard->n_bins, dev_shard_ext->n_bins);
ASSERT_EQ(dev_shard->gidx_buffer.size(), dev_shard_ext->gidx_buffer.size());
ASSERT_EQ(h_gidx_buffer, h_gidx_buffer_ext);
}
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl(1);
}
#if defined(XGBOOST_USE_NCCL)
TEST(GpuHist, MGPU_TestHistogramIndex) {
auto devices = GPUSet::AllVisible();
CHECK_GT(devices.Size(), 1);
TestHistogramIndexImpl(-1);
}
#endif
} // namespace tree
} // namespace xgboost
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.