hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
6d801d33619a541e2e5dcc41ea1bd7bcb05b8d82.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#include <hip/hip_runtime.h>
#define NUM_CYCLES 500
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x*blockDim.x+threadIdx.x; // handle the data at this index
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int deviceCount, device;
int blocks,threads,n;
double time_s;
long start_time,end_time;
struct hipDeviceProp_t properties;
int *a, *b, *c;
struct timeval start,stop;
int *dev_a, *dev_b, *dev_c;
hipError_t cudaResultCode = hipGetDeviceCount(&deviceCount);
if (cudaResultCode != hipSuccess)
deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
hipGetDeviceProperties(&properties, device);
if (properties.major != 9999) /* 9999 means emulation only */
if (device==0)
{
printf("multiProcessorCount %d\n",properties.multiProcessorCount);
printf("maxThreadsPerMultiProcessor %d\n",properties.maxThreadsPerMultiProcessor);
blocks=properties.multiProcessorCount;
threads=properties.maxThreadsPerMultiProcessor;
n=properties.multiProcessorCount * properties.maxThreadsPerMultiProcessor;
}
}
a=(int*)malloc(n * sizeof(int));
b=(int*)malloc(n * sizeof(int));
c=(int*)malloc(n * sizeof(int));
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, n * sizeof(int) );
hipMalloc( (void**)&dev_b, n * sizeof(int) );
hipMalloc( (void**)&dev_c, n * sizeof(int) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
hipMemcpy( dev_a, a, n * sizeof(int),hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, n * sizeof(int),hipMemcpyHostToDevice );
gettimeofday(&start,NULL);
int l;
start_time=start.tv_sec*1000000 + start.tv_usec;//get start time
for(l=0;l<NUM_CYCLES;l++)
hipLaunchKernelGGL(( add), dim3(blocks),dim3(threads), 0, 0, dev_a, dev_b, dev_c );
gettimeofday(&stop,NULL);
end_time=stop.tv_sec*1000000 + stop.tv_usec;//get end time
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, n * sizeof(int),hipMemcpyDeviceToHost );
// display the results
// for (int i=0; i<N; i++) {
// printf( "%d + %d = %d\n", a[i], b[i], c[i] );
// }
// free the memory allocated on the GPU
time_s=end_time-start_time;
printf("Time taken: %lf",time_s);
printf("GFLOPS: %lf",(double)(NUM_CYCLES*n*3)/(time_s*1000000000));
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
}
|
6d801d33619a541e2e5dcc41ea1bd7bcb05b8d82.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#include <cuda.h>
#define NUM_CYCLES 500
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x*blockDim.x+threadIdx.x; // handle the data at this index
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int deviceCount, device;
int blocks,threads,n;
double time_s;
long start_time,end_time;
struct cudaDeviceProp properties;
int *a, *b, *c;
struct timeval start,stop;
int *dev_a, *dev_b, *dev_c;
cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount);
if (cudaResultCode != cudaSuccess)
deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties(&properties, device);
if (properties.major != 9999) /* 9999 means emulation only */
if (device==0)
{
printf("multiProcessorCount %d\n",properties.multiProcessorCount);
printf("maxThreadsPerMultiProcessor %d\n",properties.maxThreadsPerMultiProcessor);
blocks=properties.multiProcessorCount;
threads=properties.maxThreadsPerMultiProcessor;
n=properties.multiProcessorCount * properties.maxThreadsPerMultiProcessor;
}
}
a=(int*)malloc(n * sizeof(int));
b=(int*)malloc(n * sizeof(int));
c=(int*)malloc(n * sizeof(int));
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, n * sizeof(int) );
cudaMalloc( (void**)&dev_b, n * sizeof(int) );
cudaMalloc( (void**)&dev_c, n * sizeof(int) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy( dev_a, a, n * sizeof(int),cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, n * sizeof(int),cudaMemcpyHostToDevice );
gettimeofday(&start,NULL);
int l;
start_time=start.tv_sec*1000000 + start.tv_usec;//get start time
for(l=0;l<NUM_CYCLES;l++)
add<<<blocks,threads>>>( dev_a, dev_b, dev_c );
gettimeofday(&stop,NULL);
end_time=stop.tv_sec*1000000 + stop.tv_usec;//get end time
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, n * sizeof(int),cudaMemcpyDeviceToHost );
// display the results
// for (int i=0; i<N; i++) {
// printf( "%d + %d = %d\n", a[i], b[i], c[i] );
// }
// free the memory allocated on the GPU
time_s=end_time-start_time;
printf("Time taken: %lf",time_s);
printf("GFLOPS: %lf",(double)(NUM_CYCLES*n*3)/(time_s*1000000000));
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
64cf625c406e984237b5232ca1ed2fef633d9a11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include "common/fmt.hpp"
#include "common/utils.hpp"
#include "hiprand/hiprand_kernel.h"
struct Vec {
double x, y, z; // position, also color (r,g,b)
__device__ __host__ Vec operator+(const Vec &b) const {
Vec v;
v.x = x+b.x;
v.y = y+b.y;
v.z = z+b.z;
return v;
}
__device__ __host__ Vec operator-(const Vec &b) const {
Vec v;
v.x = x - b.x;
v.y = y - b.y;
v.z = z - b.z;
return v;
/* return Vec(x-b.x,y-b.y,z-b.z); */
}
__device__ __host__ Vec operator*(double b) const {
Vec v;
v.x = x * b;
v.y = y * b;
v.z = z * b;
return v;
/* return Vec(x*b,y*b,z*b); */
}
__device__ __host__ Vec operator%(Vec&b){
Vec v;
v.x = y * b.z - z * b.y;
v.y = z * b.x - x * b.z;
v.z = x * b.y - y * b.x;
return v;
/* return Vec(y*b.z-z*b.y,z*b.x-x*b.z,x*b.y-y*b.x); */
}
__device__ __host__ Vec mult(const Vec &b) const {
Vec v;
v.x = x * b.x;
v.y = y * b.y;
v.z = z * b.z;
return v;
/* return Vec(x*b.x,y*b.y,z*b.z); */
}
__device__ __host__ Vec& norm() { return *this = *this * (1/sqrt(x*x+y*y+z*z)); }
__device__ __host__ double dot(const Vec &b) const { return x*b.x+y*b.y+z*b.z; } // cross:
};
struct Ray { Vec o, d; };
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance()
struct Sphere {
double rad; // radius
Vec p, e, c; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__device__ __host__ double intersect(const Ray &r) const { // returns distance, 0 if nohit
Vec op = p-r.o; // Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
double t, eps=1e-4, b=op.dot(r.d), det=b*b-op.dot(op)+rad*rad;
if (det<0) return 0; else det=sqrt(det);
return (t=b-det)>eps ? t : ((t=b+det)>eps ? t : 0);
}
};
__device__ __host__ Vec new_vec(double x_=0, double y_=0, double z_=0) {
Vec v;
v.x = x_;
v.y = y_;
v.z = z_;
return v;
}
__device__ __host__ Ray new_ray(Vec o_, Vec d_) {
Ray r;
r.o = o_;
r.d = d_;
return r;
}
__device__ __host__ Sphere new_sphere(double rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) {
Sphere s;
s.rad = rad_;
s.p = p_;
s.e = e_;
s.c = c_;
s.refl = refl_;
return s;
}
// CUDA FUNCTIONS ===========================================================
#define NUM_SPHERES 9
static __constant__ Sphere SPHERES[NUM_SPHERES];
__device__ __host__ inline double clamp(double x) {
return x<0 ? 0 : x>1 ? 1 : x;
}
int toInt(double x) {
return int(pow(clamp(x),1/2.2)*255+.5);
}
__device__ bool intersect(const Ray &r, double &t, int &id) {
int n = NUM_SPHERES;
double d;
double inf = t = 1e20;
for(int i = int(n); i--;)
if( (d = SPHERES[i].intersect(r)) && d<t ) {
t=d;
id=i;
}
return t < inf;
}
#define STACK_SIZE 100
__device__ Vec linear_radiance(const Ray &r_, int depth_, hiprandState_t *Xi){
double t; // distance to intersection
int id=0; // id of intersected object
Ray r=r_;
int depth=depth_;
// L0 = Le0 + f0*(L1)
// = Le0 + f0*(Le1 + f1*L2)
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(L3))
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(Le3 + f3*(L4)))
// = ...
// = Le0 + f0*Le1 + f0*f1*Le2 + f0*f1*f2*Le3 + f0*f1*f2*f3*Le4 + ...
//
// So:
// F = 1
// while (1){
// L += F*Lei
// F *= fi
// }
Vec cl = new_vec(0,0,0); // accumulated color
Vec cf = new_vec(1,1,1); // accumulated reflectance
while (1){
if (!intersect(r, t, id)) return cl; // if miss, return black
const Sphere &obj = SPHERES[id]; // the hit object
Vec x=r.o+r.d*t, n=(x-obj.p).norm(), nl=n.dot(r.d)<0?n:n*-1, f=obj.c;
double p = f.x>f.y && f.x>f.z ? f.x : f.y>f.z ? f.y : f.z; // max refl
cl = cl + cf.mult(obj.e);
if (++depth>5) if (hiprand_uniform(Xi)<p) f=f*(1/p); else return cl; //R.R.
cf = cf.mult(f);
if (obj.refl == DIFF){ // Ideal DIFFUSE reflection
double r1=2*M_PI*hiprand_uniform(Xi), r2=hiprand_uniform(Xi), r2s=sqrt(r2);
Vec w=nl, u=((fabs(w.x)>.1? new_vec(0,1):new_vec(1))%w).norm(), v=w%u;
Vec d = (u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrt(1-r2)).norm();
//return obj.e + f.mult(radiance(Ray(x,d),depth,Xi));
r = new_ray(x,d);
continue;
} else if (obj.refl == SPEC){ // Ideal SPECULAR reflection
//return obj.e + f.mult(radiance(Ray(x,r.d-n*2*n.dot(r.d)),depth,Xi));
r = new_ray(x,r.d-n*2*n.dot(r.d));
continue;
}
Ray reflRay = new_ray(x, r.d-n*2*n.dot(r.d)); // Ideal dielectric REFRACTION
bool into = n.dot(nl)>0; // Ray from outside going in?
double nc=1, nt=1.5, nnt=into?nc/nt:nt/nc, ddn=r.d.dot(nl), cos2t;
if ((cos2t=1-nnt*nnt*(1-ddn*ddn))<0){ // Total internal reflection
//return obj.e + f.mult(radiance(reflRay,depth,Xi));
r = reflRay;
continue;
}
Vec tdir = (r.d*nnt - n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm();
double a=nt-nc, b=nt+nc, R0=a*a/(b*b), c = 1-(into?-ddn:tdir.dot(n));
double Re=R0+(1-R0)*c*c*c*c*c,Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP=Tr/(1-P);
// return obj.e + f.mult(hiprand_uniform(Xi)<P ?
// radiance(reflRay, depth,Xi)*RP:
// radiance(Ray(x,tdir),depth,Xi)*TP);
if (hiprand_uniform(Xi)<P){
cf = cf*RP;
r = reflRay;
} else {
cf = cf*TP;
r = new_ray(x,tdir);
}
continue;
}
}
__global__ void calc_pixel(Vec *out, int samps) {
// Calculates a single pixel in the final image
// Returns a color vector that is later written to the final image.
hiprandState_t state;
int w=1024, h=768;
Ray cam = new_ray(new_vec(50,52,295.6), new_vec(0,-0.042612,-1).norm()); // cam pos, dir
Vec cx = new_vec(w*.5135/h), cy = (cx%cam.d).norm()*.5135;
int t = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(t, t, 0, &state);
for(int idx = 0; idx < 4; idx++){
int y = t/w + (idx*(h/4));
int x = t%w;
int i = (h-y-1) * w + x;
for (int sy = 0; sy < 2; sy++) { // 2x2 subpixel rows
for (int sx = 0; sx < 2; sx++) { // 2x2 subpixel cols
Vec r = new_vec();
for (int s = 0; s < samps; s++) {
double r1 = 2*hiprand_uniform(&state), dx=r1<1 ? sqrt(r1)-1: 1-sqrt(2-r1);
double r2 = 2*hiprand_uniform(&state), dy=r2<1 ? sqrt(r2)-1: 1-sqrt(2-r2);
Vec d = cx*( ( (sx+.5 + dx)/2 + x)/w - .5) +
cy*( ( (sy+.5 + dy)/2 + y)/h - .5) + cam.d;
r = r + linear_radiance(new_ray(cam.o+d.norm()*140,d.norm()),0, &state) * (1./samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
out[i] = out[i] + new_vec(clamp(r.x),clamp(r.y),clamp(r.z))*.25;
}
}
}
}
int main(int argc, char *argv[]) {
float BLOCK_SIZE = 512;
timer_start("Starting program."); //@@ start a timer
Sphere *spheres = (Sphere *)malloc(NUM_SPHERES * sizeof(Sphere));
spheres[0] = new_sphere(1e5, new_vec( 1e5+1,40.8,81.6), new_vec(),new_vec(.75,.25,.25),DIFF);//Left
spheres[1] = new_sphere(1e5, new_vec(-1e5+99,40.8,81.6), new_vec(),new_vec(.25,.25,.75),DIFF);//Rght
spheres[2] = new_sphere(1e5, new_vec(50,40.8, 1e5), new_vec(),new_vec(.75,.75,.75),DIFF);//Back
spheres[3] = new_sphere(1e5, new_vec(50,40.8,-1e5+170), new_vec(),new_vec(), DIFF);//Frnt
spheres[4] = new_sphere(1e5, new_vec(50, 1e5, 81.6), new_vec(),new_vec(.75,.75,.75),DIFF);//Botm
spheres[5] = new_sphere(1e5, new_vec(50,-1e5+81.6,81.6), new_vec(),new_vec(.75,.75,.75),DIFF);//Top
spheres[6] = new_sphere(16.5, new_vec(27,16.5,47), new_vec(),new_vec(1,1,1)*.999, SPEC);//Mirr
spheres[7] = new_sphere(16.5, new_vec(73,16.5,78), new_vec(),new_vec(1,1,1)*.999, REFR);//Glas
spheres[8] = new_sphere(600, new_vec(50,681.6-.27,81.6), new_vec(12,12,12), new_vec(), DIFF);//Lite
// Copy the spheres to constant memory
hipMemcpyToSymbol(SPHERES, spheres, NUM_SPHERES * sizeof(Sphere));
int w=1024, h=768; // # samples
int samps = argc==2 ? atoi(argv[1])/4 : 250;
Vec *host_out = (Vec *)malloc(sizeof(Vec) * w * h);
Vec *device_out;
hipMalloc((void **) &device_out, sizeof(Vec) * w * h);
printf("This is Chris's 1-D optimization.\n");
printf("Render starting!\nBlock size is %i\n", int(BLOCK_SIZE));
dim3 grid(ceil((w*h)/(4*BLOCK_SIZE)), 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( calc_pixel), dim3(grid), dim3(block), 0, 0, device_out, samps);
hipDeviceSynchronize();
hipEventRecord(stop);
hipMemcpy(host_out, device_out, sizeof(Vec) * w * h, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
hipFree(device_out);
float milliseconds;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Image rendered in %f milliseconds!\n", milliseconds);
FILE *f = fopen("image.ppm", "w"); // Write image to PPM file.
fprintf(f, "P3\n%d %d\n%d\n", w, h, 255);
for (int i=0; i<w*h; i++)
fprintf(f,"%d %d %d ", toInt(host_out[i].x), toInt(host_out[i].y), toInt(host_out[i].z));
fclose(f);
free(host_out);
free(spheres);
timer_stop();
return 0;
}
|
64cf625c406e984237b5232ca1ed2fef633d9a11.cu
|
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include "common/fmt.hpp"
#include "common/utils.hpp"
#include "curand_kernel.h"
struct Vec {
double x, y, z; // position, also color (r,g,b)
__device__ __host__ Vec operator+(const Vec &b) const {
Vec v;
v.x = x+b.x;
v.y = y+b.y;
v.z = z+b.z;
return v;
}
__device__ __host__ Vec operator-(const Vec &b) const {
Vec v;
v.x = x - b.x;
v.y = y - b.y;
v.z = z - b.z;
return v;
/* return Vec(x-b.x,y-b.y,z-b.z); */
}
__device__ __host__ Vec operator*(double b) const {
Vec v;
v.x = x * b;
v.y = y * b;
v.z = z * b;
return v;
/* return Vec(x*b,y*b,z*b); */
}
__device__ __host__ Vec operator%(Vec&b){
Vec v;
v.x = y * b.z - z * b.y;
v.y = z * b.x - x * b.z;
v.z = x * b.y - y * b.x;
return v;
/* return Vec(y*b.z-z*b.y,z*b.x-x*b.z,x*b.y-y*b.x); */
}
__device__ __host__ Vec mult(const Vec &b) const {
Vec v;
v.x = x * b.x;
v.y = y * b.y;
v.z = z * b.z;
return v;
/* return Vec(x*b.x,y*b.y,z*b.z); */
}
__device__ __host__ Vec& norm() { return *this = *this * (1/sqrt(x*x+y*y+z*z)); }
__device__ __host__ double dot(const Vec &b) const { return x*b.x+y*b.y+z*b.z; } // cross:
};
struct Ray { Vec o, d; };
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance()
struct Sphere {
double rad; // radius
Vec p, e, c; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__device__ __host__ double intersect(const Ray &r) const { // returns distance, 0 if nohit
Vec op = p-r.o; // Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
double t, eps=1e-4, b=op.dot(r.d), det=b*b-op.dot(op)+rad*rad;
if (det<0) return 0; else det=sqrt(det);
return (t=b-det)>eps ? t : ((t=b+det)>eps ? t : 0);
}
};
__device__ __host__ Vec new_vec(double x_=0, double y_=0, double z_=0) {
Vec v;
v.x = x_;
v.y = y_;
v.z = z_;
return v;
}
__device__ __host__ Ray new_ray(Vec o_, Vec d_) {
Ray r;
r.o = o_;
r.d = d_;
return r;
}
__device__ __host__ Sphere new_sphere(double rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) {
Sphere s;
s.rad = rad_;
s.p = p_;
s.e = e_;
s.c = c_;
s.refl = refl_;
return s;
}
// CUDA FUNCTIONS ===========================================================
#define NUM_SPHERES 9
static __constant__ Sphere SPHERES[NUM_SPHERES];
__device__ __host__ inline double clamp(double x) {
return x<0 ? 0 : x>1 ? 1 : x;
}
int toInt(double x) {
return int(pow(clamp(x),1/2.2)*255+.5);
}
__device__ bool intersect(const Ray &r, double &t, int &id) {
int n = NUM_SPHERES;
double d;
double inf = t = 1e20;
for(int i = int(n); i--;)
if( (d = SPHERES[i].intersect(r)) && d<t ) {
t=d;
id=i;
}
return t < inf;
}
#define STACK_SIZE 100
__device__ Vec linear_radiance(const Ray &r_, int depth_, curandState *Xi){
double t; // distance to intersection
int id=0; // id of intersected object
Ray r=r_;
int depth=depth_;
// L0 = Le0 + f0*(L1)
// = Le0 + f0*(Le1 + f1*L2)
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(L3))
// = Le0 + f0*(Le1 + f1*(Le2 + f2*(Le3 + f3*(L4)))
// = ...
// = Le0 + f0*Le1 + f0*f1*Le2 + f0*f1*f2*Le3 + f0*f1*f2*f3*Le4 + ...
//
// So:
// F = 1
// while (1){
// L += F*Lei
// F *= fi
// }
Vec cl = new_vec(0,0,0); // accumulated color
Vec cf = new_vec(1,1,1); // accumulated reflectance
while (1){
if (!intersect(r, t, id)) return cl; // if miss, return black
const Sphere &obj = SPHERES[id]; // the hit object
Vec x=r.o+r.d*t, n=(x-obj.p).norm(), nl=n.dot(r.d)<0?n:n*-1, f=obj.c;
double p = f.x>f.y && f.x>f.z ? f.x : f.y>f.z ? f.y : f.z; // max refl
cl = cl + cf.mult(obj.e);
if (++depth>5) if (curand_uniform(Xi)<p) f=f*(1/p); else return cl; //R.R.
cf = cf.mult(f);
if (obj.refl == DIFF){ // Ideal DIFFUSE reflection
double r1=2*M_PI*curand_uniform(Xi), r2=curand_uniform(Xi), r2s=sqrt(r2);
Vec w=nl, u=((fabs(w.x)>.1? new_vec(0,1):new_vec(1))%w).norm(), v=w%u;
Vec d = (u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrt(1-r2)).norm();
//return obj.e + f.mult(radiance(Ray(x,d),depth,Xi));
r = new_ray(x,d);
continue;
} else if (obj.refl == SPEC){ // Ideal SPECULAR reflection
//return obj.e + f.mult(radiance(Ray(x,r.d-n*2*n.dot(r.d)),depth,Xi));
r = new_ray(x,r.d-n*2*n.dot(r.d));
continue;
}
Ray reflRay = new_ray(x, r.d-n*2*n.dot(r.d)); // Ideal dielectric REFRACTION
bool into = n.dot(nl)>0; // Ray from outside going in?
double nc=1, nt=1.5, nnt=into?nc/nt:nt/nc, ddn=r.d.dot(nl), cos2t;
if ((cos2t=1-nnt*nnt*(1-ddn*ddn))<0){ // Total internal reflection
//return obj.e + f.mult(radiance(reflRay,depth,Xi));
r = reflRay;
continue;
}
Vec tdir = (r.d*nnt - n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm();
double a=nt-nc, b=nt+nc, R0=a*a/(b*b), c = 1-(into?-ddn:tdir.dot(n));
double Re=R0+(1-R0)*c*c*c*c*c,Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP=Tr/(1-P);
// return obj.e + f.mult(curand_uniform(Xi)<P ?
// radiance(reflRay, depth,Xi)*RP:
// radiance(Ray(x,tdir),depth,Xi)*TP);
if (curand_uniform(Xi)<P){
cf = cf*RP;
r = reflRay;
} else {
cf = cf*TP;
r = new_ray(x,tdir);
}
continue;
}
}
__global__ void calc_pixel(Vec *out, int samps) {
// Calculates a single pixel in the final image
// Returns a color vector that is later written to the final image.
curandState state;
int w=1024, h=768;
Ray cam = new_ray(new_vec(50,52,295.6), new_vec(0,-0.042612,-1).norm()); // cam pos, dir
Vec cx = new_vec(w*.5135/h), cy = (cx%cam.d).norm()*.5135;
int t = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(t, t, 0, &state);
for(int idx = 0; idx < 4; idx++){
int y = t/w + (idx*(h/4));
int x = t%w;
int i = (h-y-1) * w + x;
for (int sy = 0; sy < 2; sy++) { // 2x2 subpixel rows
for (int sx = 0; sx < 2; sx++) { // 2x2 subpixel cols
Vec r = new_vec();
for (int s = 0; s < samps; s++) {
double r1 = 2*curand_uniform(&state), dx=r1<1 ? sqrt(r1)-1: 1-sqrt(2-r1);
double r2 = 2*curand_uniform(&state), dy=r2<1 ? sqrt(r2)-1: 1-sqrt(2-r2);
Vec d = cx*( ( (sx+.5 + dx)/2 + x)/w - .5) +
cy*( ( (sy+.5 + dy)/2 + y)/h - .5) + cam.d;
r = r + linear_radiance(new_ray(cam.o+d.norm()*140,d.norm()),0, &state) * (1./samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
out[i] = out[i] + new_vec(clamp(r.x),clamp(r.y),clamp(r.z))*.25;
}
}
}
}
int main(int argc, char *argv[]) {
float BLOCK_SIZE = 512;
timer_start("Starting program."); //@@ start a timer
Sphere *spheres = (Sphere *)malloc(NUM_SPHERES * sizeof(Sphere));
spheres[0] = new_sphere(1e5, new_vec( 1e5+1,40.8,81.6), new_vec(),new_vec(.75,.25,.25),DIFF);//Left
spheres[1] = new_sphere(1e5, new_vec(-1e5+99,40.8,81.6), new_vec(),new_vec(.25,.25,.75),DIFF);//Rght
spheres[2] = new_sphere(1e5, new_vec(50,40.8, 1e5), new_vec(),new_vec(.75,.75,.75),DIFF);//Back
spheres[3] = new_sphere(1e5, new_vec(50,40.8,-1e5+170), new_vec(),new_vec(), DIFF);//Frnt
spheres[4] = new_sphere(1e5, new_vec(50, 1e5, 81.6), new_vec(),new_vec(.75,.75,.75),DIFF);//Botm
spheres[5] = new_sphere(1e5, new_vec(50,-1e5+81.6,81.6), new_vec(),new_vec(.75,.75,.75),DIFF);//Top
spheres[6] = new_sphere(16.5, new_vec(27,16.5,47), new_vec(),new_vec(1,1,1)*.999, SPEC);//Mirr
spheres[7] = new_sphere(16.5, new_vec(73,16.5,78), new_vec(),new_vec(1,1,1)*.999, REFR);//Glas
spheres[8] = new_sphere(600, new_vec(50,681.6-.27,81.6), new_vec(12,12,12), new_vec(), DIFF);//Lite
// Copy the spheres to constant memory
cudaMemcpyToSymbol(SPHERES, spheres, NUM_SPHERES * sizeof(Sphere));
int w=1024, h=768; // # samples
int samps = argc==2 ? atoi(argv[1])/4 : 250;
Vec *host_out = (Vec *)malloc(sizeof(Vec) * w * h);
Vec *device_out;
cudaMalloc((void **) &device_out, sizeof(Vec) * w * h);
printf("This is Chris's 1-D optimization.\n");
printf("Render starting!\nBlock size is %i\n", int(BLOCK_SIZE));
dim3 grid(ceil((w*h)/(4*BLOCK_SIZE)), 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
calc_pixel<<<grid, block>>>(device_out, samps);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(host_out, device_out, sizeof(Vec) * w * h, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaFree(device_out);
float milliseconds;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Image rendered in %f milliseconds!\n", milliseconds);
FILE *f = fopen("image.ppm", "w"); // Write image to PPM file.
fprintf(f, "P3\n%d %d\n%d\n", w, h, 255);
for (int i=0; i<w*h; i++)
fprintf(f,"%d %d %d ", toInt(host_out[i].x), toInt(host_out[i].y), toInt(host_out[i].z));
fclose(f);
free(host_out);
free(spheres);
timer_stop();
return 0;
}
|
b827fbced20415eb1d312a6a66e3fa2e6e62ac0e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2019 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Conlain Kelly, Nic Olsen, Dan Negrut, Ruochun Zhang
// =============================================================================
#include "chrono_gpu/cuda/ChGpu_SMC_trimesh.cuh"
#include "chrono_gpu/cuda/ChGpu_SMC.cuh"
#include "chrono_gpu/physics/ChSystemGpuMesh_impl.h"
#include "chrono_gpu/utils/ChGpuUtilities.h"
#include <math_constants.h>
namespace chrono {
namespace gpu {
__host__ void ChSystemGpuMesh_impl::runTriangleBroadphase() {
METRICS_PRINTF("Resetting broadphase info!\n");
unsigned int numTriangles = meshSoup->nTrianglesInSoup;
unsigned int nblocks = (numTriangles + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK;
hipLaunchKernelGGL(( determineCountOfSDsTouchedByEachTriangle), dim3(nblocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0,
meshSoup, Triangle_NumSDsTouching.data(), gran_params, tri_params);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
// do prefix scan
size_t temp_storage_bytes = 0;
unsigned int* out_ptr = Triangle_SDsCompositeOffsets.data();
unsigned int* in_ptr = Triangle_NumSDsTouching.data();
// copy data into the tmp array
gpuErrchk(hipMemcpy(out_ptr, in_ptr, numTriangles * sizeof(unsigned int), hipMemcpyDeviceToDevice));
hipcub::DeviceScan::ExclusiveSum(NULL, temp_storage_bytes, in_ptr, out_ptr, numTriangles);
gpuErrchk(hipDeviceSynchronize());
// get pointer to device memory; this memory block will be used internally by CUB, for scratch area
void* d_scratch_space = (void*)stateOfSolver_resources.pDeviceMemoryScratchSpace(temp_storage_bytes);
// Run exclusive prefix sum
hipcub::DeviceScan::ExclusiveSum(d_scratch_space, temp_storage_bytes, in_ptr, out_ptr, numTriangles);
gpuErrchk(hipDeviceSynchronize());
unsigned int numOfTriangleTouchingSD_instances; // total number of instances in which a triangle touches an SD
numOfTriangleTouchingSD_instances = out_ptr[numTriangles - 1] + in_ptr[numTriangles - 1];
// resize, if need be, several dummy vectors that handle in managed memory
SDsTouchedByEachTriangle_composite_out.resize(numOfTriangleTouchingSD_instances, NULL_CHGPU_ID);
SDsTouchedByEachTriangle_composite.resize(numOfTriangleTouchingSD_instances, NULL_CHGPU_ID);
TriangleIDS_ByMultiplicity_out.resize(numOfTriangleTouchingSD_instances, NULL_CHGPU_ID);
TriangleIDS_ByMultiplicity.resize(numOfTriangleTouchingSD_instances, NULL_CHGPU_ID);
// sort key-value where the key is SD id, value is triangle ID in composite array
hipLaunchKernelGGL(( storeSDsTouchedByEachTriangle), dim3(nblocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0,
meshSoup, Triangle_NumSDsTouching.data(), Triangle_SDsCompositeOffsets.data(),
SDsTouchedByEachTriangle_composite.data(), TriangleIDS_ByMultiplicity.data(), gran_params, tri_params);
gpuErrchk(hipDeviceSynchronize());
unsigned int* d_keys_in = SDsTouchedByEachTriangle_composite.data();
unsigned int* d_keys_out = SDsTouchedByEachTriangle_composite_out.data();
unsigned int* d_values_in = TriangleIDS_ByMultiplicity.data();
unsigned int* d_values_out = TriangleIDS_ByMultiplicity_out.data();
// Run CUB sorting operation, key-value type.
// Key: the ID of the SD.
// Value: the ID of the triangle that touches the "Key" SD.
// The outcome of the sort operation will look like this:
// SDs: 23 23 23 89 89 89 89 107 107 107 etc.
// Triangle: 5 9 17 43 67 108 221 6 12 298 etc.
// First, determine temporary device storage requirements; pass null, CUB tells us what it needs
hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out,
numOfTriangleTouchingSD_instances);
gpuErrchk(hipDeviceSynchronize());
// get pointer to device memory; this memory block will be used internally by CUB
d_scratch_space = (void*)stateOfSolver_resources.pDeviceMemoryScratchSpace(temp_storage_bytes);
hipcub::DeviceRadixSort::SortPairs(d_scratch_space, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in,
d_values_out, numOfTriangleTouchingSD_instances);
gpuErrchk(hipDeviceSynchronize());
// We started with SDs touching a triangle; we just flipped this through the key-value sort. That is, we now
// know the collection of triangles that touch each SD; SD by SD.
SD_trianglesInEachSD_composite.resize(TriangleIDS_ByMultiplicity_out.size());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(SD_trianglesInEachSD_composite.data(), TriangleIDS_ByMultiplicity_out.data(),
numOfTriangleTouchingSD_instances * sizeof(unsigned int), hipMemcpyDeviceToDevice));
// The CUB encode operation below will tell us what SDs are actually touched by triangles, and how many triangles
// touch each SD.
//
// "d_in" is SDsTouchedByEachTriangle_composite_out; contains the IDs of the SDs that have triangles in them; if an
// SD is touched by "t" triangles, it'll show up "t" times in this array
unsigned int* d_in = d_keys_out;
// d_unique_out stores a list of *unique* SDs with the following property: each SD in this list has at least one
// triangle touching it. In terms of memory, this is pretty wasteful since it's unilkely that all SDs are touched by
// at least one triangle; perhaps revisit later.
unsigned int* d_unique_out =
(unsigned int*)stateOfSolver_resources.pDeviceMemoryScratchSpace(nSDs * sizeof(unsigned int));
// squatting on SD_TrianglesCompositeOffsets device vector; its size is nSDs. Works in tandem with d_unique_out.
// If d_unique_out[4]=72, d_counts_out[4] says how many triangles touch SD 72.
unsigned int* d_counts_out = SD_TrianglesCompositeOffsets.data();
// squatting on TriangleIDS_ByMultiplicity, which is not needed anymore. We're using only *one* entry in this array.
// Output value represents the number of SDs that have at last one triangle touching the SD
unsigned int* d_num_runs_out = Triangle_SDsCompositeOffsets.data();
// dry run, figure out the number of bytes that will be used in the actual run
hipcub::DeviceRunLengthEncode::Encode(NULL, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out,
numOfTriangleTouchingSD_instances);
gpuErrchk(hipDeviceSynchronize());
d_scratch_space = TriangleIDS_ByMultiplicity.data();
// Run the actual encoding operation
hipcub::DeviceRunLengthEncode::Encode(d_scratch_space, temp_storage_bytes, d_in, d_unique_out, d_counts_out,
d_num_runs_out, numOfTriangleTouchingSD_instances);
gpuErrchk(hipDeviceSynchronize());
// SD_numTrianglesTouching contains only zeros
// compute offsets in SD_trianglesInEachSD_composite and also counts for how many triangles touch each SD.
// Start by zeroing out, it's important since not all entries will be touched in
gpuErrchk(hipMemset(SD_numTrianglesTouching.data(), 0, nSDs * sizeof(unsigned int)));
nblocks = ((*d_num_runs_out) + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK;
if (nblocks > 0) {
hipLaunchKernelGGL(( finalizeSD_numTrianglesTouching), dim3(nblocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, d_unique_out, d_counts_out, d_num_runs_out,
SD_numTrianglesTouching.data());
gpuErrchk(hipDeviceSynchronize());
}
// Now assert that no SD has over max amount of triangles
// If there is one, exit graciously
in_ptr = SD_numTrianglesTouching.data();
// Just borrow the first element of SD_TrianglesCompositeOffsets to store the max value
unsigned int* maxTriCount = SD_TrianglesCompositeOffsets.data();
hipcub::DeviceReduce::Max(NULL, temp_storage_bytes, in_ptr, maxTriCount, nSDs);
gpuErrchk(hipDeviceSynchronize());
d_scratch_space = (void*)stateOfSolver_resources.pDeviceMemoryScratchSpace(temp_storage_bytes);
hipcub::DeviceReduce::Max(d_scratch_space, temp_storage_bytes, in_ptr, maxTriCount, nSDs);
gpuErrchk(hipDeviceSynchronize());
if (*maxTriCount > MAX_TRIANGLE_COUNT_PER_SD)
CHGPU_ERROR("ERROR! %u triangles are found in one of the SDs! The max allowance is %u.\n", *maxTriCount,
MAX_TRIANGLE_COUNT_PER_SD);
// Lastly, we need to do a CUB prefix scan to get the offsets in the big composite array
in_ptr = SD_numTrianglesTouching.data();
out_ptr = SD_TrianglesCompositeOffsets.data();
hipcub::DeviceScan::ExclusiveSum(NULL, temp_storage_bytes, in_ptr, out_ptr, nSDs);
gpuErrchk(hipDeviceSynchronize());
d_scratch_space = (void*)stateOfSolver_resources.pDeviceMemoryScratchSpace(temp_storage_bytes);
// Run CUB exclusive prefix sum
hipcub::DeviceScan::ExclusiveSum(d_scratch_space, temp_storage_bytes, in_ptr, out_ptr, nSDs);
gpuErrchk(hipDeviceSynchronize());
}
__global__ void interactionGranMat_TriangleSoup_matBased(ChSystemGpuMesh_impl::TriangleSoupPtr d_triangleSoup,
ChSystemGpu_impl::GranSphereDataPtr sphere_data,
const unsigned int* SD_trianglesInEachSD_composite,
const unsigned int* SD_numTrianglesTouching,
const unsigned int* SD_TrianglesCompositeOffsets,
ChSystemGpu_impl::GranParamsPtr gran_params,
ChSystemGpuMesh_impl::MeshParamsPtr mesh_params,
unsigned int triangleFamilyHistmapOffset) {
__shared__ unsigned int triangleIDs[MAX_TRIANGLE_COUNT_PER_SD]; //!< global ID of the triangles touching this SD
__shared__ int3 sphere_pos_local[MAX_COUNT_OF_SPHERES_PER_SD]; //!< local coordinate of the sphere
__shared__ float3 sphere_vel[MAX_COUNT_OF_SPHERES_PER_SD];
// TODO figure out how we can do this better with no friction
__shared__ float3 omega[MAX_COUNT_OF_SPHERES_PER_SD];
__shared__ double3 node1[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 1st node of the triangle
__shared__ double3 node2[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 2nd node of the triangle
__shared__ double3 node3[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 3rd node of the triangle
// define an alias first
unsigned int thisSD = blockIdx.x;
if (SD_numTrianglesTouching[thisSD] == 0) {
return; // no triangle touches this block's SD
}
unsigned int spheresTouchingThisSD = sphere_data->SD_NumSpheresTouching[thisSD];
if (spheresTouchingThisSD == 0) {
return; // no sphere touches this block's SD
}
// Getting here means that there are both triangles and DEs in this SD.
unsigned int numSDTriangles = SD_numTrianglesTouching[thisSD];
unsigned int sphereIDLocal = threadIdx.x;
unsigned int sphereIDGlobal = NULL_CHGPU_ID;
// Bring in data from global into shmem. Only a subset of threads get to do this.
// Note that we're not using shared memory very heavily, so our bandwidth is pretty low
if (sphereIDLocal < spheresTouchingThisSD) {
size_t SD_composite_offset = sphere_data->SD_SphereCompositeOffsets[thisSD];
// TODO standardize this
size_t offset_in_composite_Array = SD_composite_offset + sphereIDLocal;
sphereIDGlobal = sphere_data->spheres_in_SD_composite[offset_in_composite_Array];
sphere_pos_local[sphereIDLocal] =
make_int3(sphere_data->sphere_local_pos_X[sphereIDGlobal], sphere_data->sphere_local_pos_Y[sphereIDGlobal],
sphere_data->sphere_local_pos_Z[sphereIDGlobal]);
unsigned int sphere_owner_SD = sphere_data->sphere_owner_SDs[sphereIDGlobal];
// if this SD doesn't own that sphere, add an offset to account
if (sphere_owner_SD != thisSD) {
sphere_pos_local[sphereIDLocal] =
sphere_pos_local[sphereIDLocal] + getOffsetFromSDs(thisSD, sphere_owner_SD, gran_params);
}
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
omega[sphereIDLocal] =
make_float3(sphere_data->sphere_Omega_X[sphereIDGlobal], sphere_data->sphere_Omega_Y[sphereIDGlobal],
sphere_data->sphere_Omega_Z[sphereIDGlobal]);
}
sphere_vel[sphereIDLocal] =
make_float3(sphere_data->pos_X_dt[sphereIDGlobal], sphere_data->pos_Y_dt[sphereIDGlobal],
sphere_data->pos_Z_dt[sphereIDGlobal]);
}
// Populate the shared memory with mesh triangle data
unsigned int tripsToCoverTriangles = (numSDTriangles + blockDim.x - 1) / blockDim.x;
unsigned int local_ID = threadIdx.x;
for (unsigned int triangTrip = 0; triangTrip < tripsToCoverTriangles; triangTrip++) {
if (local_ID < numSDTriangles) {
size_t SD_composite_offset = SD_TrianglesCompositeOffsets[thisSD];
if (SD_composite_offset == NULL_CHGPU_ID) {
ABORTABORTABORT("Invalid composite offset %lu for SD %u, touching %u triangles\n", NULL_CHGPU_ID,
thisSD, numSDTriangles);
}
size_t offset_in_composite_Array = SD_composite_offset + local_ID;
unsigned int globalID = SD_trianglesInEachSD_composite[offset_in_composite_Array];
triangleIDs[local_ID] = globalID;
// Read node positions from global memory into shared memory
// NOTE implicit cast from float to double here
unsigned int fam = d_triangleSoup->triangleFamily_ID[globalID];
node1[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node1[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
node2[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node2[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
node3[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node3[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
convert_pos_UU2SU<double3>(node1[local_ID], gran_params);
convert_pos_UU2SU<double3>(node2[local_ID], gran_params);
convert_pos_UU2SU<double3>(node3[local_ID], gran_params);
}
local_ID += blockDim.x;
}
__syncthreads(); // this call ensures data is in its place in shared memory
float3 sphere_force = {0.f, 0.f, 0.f};
float3 sphere_AngAcc = {0.f, 0.f, 0.f};
if (sphereIDLocal < spheresTouchingThisSD) {
// loop over each triangle in the SD and compute the force this sphere (thread) exerts on it
for (unsigned int triangleLocalID = 0; triangleLocalID < numSDTriangles; triangleLocalID++) {
/// we have a valid sphere and a valid triganle; check if in contact
float3 normal; // Unit normal from pt2 to pt1 (triangle contact point to sphere contact point)
float depth; // Negative in overlap
float3 pt1_float;
// Transform LRF to GRF
const unsigned int fam = d_triangleSoup->triangleFamily_ID[triangleIDs[triangleLocalID]];
bool valid_contact = false;
// vector from center of mesh body to contact point, assume this can be held in a float
float3 fromCenter;
{
double3 pt1; // Contact point on triangle
// NOTE sphere_pos_local is relative to THIS SD, not its owner SD
double3 sphCntr =
int64_t3_to_double3(convertPosLocalToGlobal(thisSD, sphere_pos_local[sphereIDLocal], gran_params));
valid_contact = face_sphere_cd(node1[triangleLocalID], node2[triangleLocalID], node3[triangleLocalID],
sphCntr, gran_params->sphereRadius_SU, normal, depth, pt1);
valid_contact = valid_contact &&
SDTripletID(pointSDTriplet(pt1.x, pt1.y, pt1.z, gran_params), gran_params) == thisSD;
pt1_float = make_float3(pt1.x, pt1.y, pt1.z);
double3 meshCenter_double =
make_double3(mesh_params->fam_frame_narrow[fam].pos[0], mesh_params->fam_frame_narrow[fam].pos[1],
mesh_params->fam_frame_narrow[fam].pos[2]);
convert_pos_UU2SU<double3>(meshCenter_double, gran_params);
double3 fromCenter_double = pt1 - meshCenter_double;
fromCenter = make_float3(fromCenter_double.x, fromCenter_double.y, fromCenter_double.z);
}
// If there is a collision, add an impulse to the sphere
if (valid_contact) {
// TODO contact models
// Use the CD information to compute the force on the grElement
// normal points from triangle to sphere
float3 delta = -depth * normal;
// effective radius is just sphere radius -- assume meshes are locally flat (a safe assumption?)
// float hertz_force_factor = sqrt(abs(depth) / gran_params->sphereRadius_SU);
// helper variables
float sqrt_Rd = sqrt(abs(depth) * gran_params->sphereRadius_SU);
float Sn = 2. * mesh_params->E_eff_s2m_SU * sqrt_Rd;
float loge = (mesh_params->COR_s2m_SU < EPSILON) ? log(EPSILON) : log(mesh_params->COR_s2m_SU);
float beta = loge / sqrt(loge * loge + CUDART_PI_F * CUDART_PI_F);
// effective mass = mass_mesh * mass_sphere / (m_mesh + mass_sphere)
float fam_mass_SU = d_triangleSoup->familyMass_SU[fam];
const float sphere_mass_SU = gran_params->sphere_mass_SU;
float m_eff = sphere_mass_SU * fam_mass_SU / (sphere_mass_SU + fam_mass_SU);
// stiffness and damping coefficient
float kn = (2.0 / 3.0) * Sn;
float gn = 2 * sqrt(5.0 / 6.0) * beta * sqrt(Sn * m_eff);
// relative velocity = v_sphere - v_mesh
float3 v_rel = sphere_vel[sphereIDLocal] - d_triangleSoup->vel[fam];
// assumes pos is the center of mass of the mesh
float3 meshCenter =
make_float3(mesh_params->fam_frame_broad[fam].pos[0], mesh_params->fam_frame_broad[fam].pos[1],
mesh_params->fam_frame_broad[fam].pos[2]);
convert_pos_UU2SU<float3>(meshCenter, gran_params);
// NOTE depth is negative and normal points from triangle to sphere center
float3 r = pt1_float + normal * (depth / 2) - meshCenter;
// Add angular velocity contribution from mesh
v_rel = v_rel - Cross(d_triangleSoup->omega[fam], r);
// add tangential components if they exist
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// Vector from the center of sphere to center of contact volume
float3 r_A = -(gran_params->sphereRadius_SU + depth / 2.f) * normal;
v_rel = v_rel + Cross(omega[sphereIDLocal], r_A);
}
// normal component of relative velocity
float projection = Dot(v_rel, normal);
// tangential component of relative velocity
float3 vrel_t = v_rel - projection * normal;
// normal force magnitude
float forceN_mag = -kn * depth + gn * projection;
float3 force_accum = forceN_mag * normal;
// Compute force updates for adhesion term, opposite the spring term
// NOTE ratio is wrt the weight of a sphere of mass 1
// NOTE the cancelation of two negatives
force_accum = force_accum + gran_params->sphere_mass_SU * mesh_params->adhesionAcc_s2m * delta / depth;
// tangential component
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// radius pointing from the contact point to the center of particle
float3 Rc = (gran_params->sphereRadius_SU + depth / 2.f) * normal;
float3 roll_ang_acc = computeRollingAngAcc(
sphere_data, gran_params, mesh_params->rolling_coeff_s2m_SU, mesh_params->spinning_coeff_s2m_SU,
force_accum, omega[sphereIDLocal], d_triangleSoup->omega[fam], Rc);
sphere_AngAcc = sphere_AngAcc + roll_ang_acc;
unsigned int BC_histmap_label = triangleFamilyHistmapOffset + fam;
// compute tangent force
float3 tangent_force = computeFrictionForces_matBased(
gran_params, sphere_data, sphereIDGlobal, BC_histmap_label,
mesh_params->static_friction_coeff_s2m, mesh_params->E_eff_s2m_SU, mesh_params->G_eff_s2m_SU,
sqrt_Rd, beta, force_accum, vrel_t, normal, m_eff);
float force_unit = gran_params->MASS_UNIT * gran_params->LENGTH_UNIT /
(gran_params->TIME_UNIT * gran_params->TIME_UNIT);
float velocity_unit = gran_params->LENGTH_UNIT / gran_params->TIME_UNIT;
force_accum = force_accum + tangent_force;
sphere_AngAcc =
sphere_AngAcc + Cross(-1.f * normal, tangent_force) / gran_params->sphereInertia_by_r;
}
// Use the CD information to compute the force and torque on the family of this triangle
sphere_force = sphere_force + force_accum;
// Force on the mesh is opposite the force on the sphere
float3 force_total = -1.f * force_accum;
float3 torque = Cross(fromCenter, force_total);
// TODO we could be much smarter about reducing this atomic write
unsigned int fam = d_triangleSoup->triangleFamily_ID[triangleIDs[triangleLocalID]];
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 0, force_total.x);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 1, force_total.y);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 2, force_total.z);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 3, torque.x);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 4, torque.y);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 5, torque.z);
}
} // end of per-triangle loop
// write back sphere forces
atomicAdd(sphere_data->sphere_acc_X + sphereIDGlobal, sphere_force.x / gran_params->sphere_mass_SU);
atomicAdd(sphere_data->sphere_acc_Y + sphereIDGlobal, sphere_force.y / gran_params->sphere_mass_SU);
atomicAdd(sphere_data->sphere_acc_Z + sphereIDGlobal, sphere_force.z / gran_params->sphere_mass_SU);
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// write back torques for later
atomicAdd(sphere_data->sphere_ang_acc_X + sphereIDGlobal, sphere_AngAcc.x);
atomicAdd(sphere_data->sphere_ang_acc_Y + sphereIDGlobal, sphere_AngAcc.y);
atomicAdd(sphere_data->sphere_ang_acc_Z + sphereIDGlobal, sphere_AngAcc.z);
}
} // end sphere id check
} // end kernel
/// <summary>
/// Kernel accounts for the interaction between the granular material and the triangles making up the triangle soup
/// </summary>
/// <param name="d_triangleSoup">- information about triangle soup (in device mem.)</param>
/// <param name="sphere_data">- data structure containing pointers to granular-material related info</param>
/// <param name="SD_trianglesInEachSD_composite">- array saying which triangles touch an SD; has information for each
/// SD</param> <param name="SD_numTrianglesTouching">- number of triangles touching each SD</param> <param
/// name="SD_TrianglesCompositeOffsets">- offsets in the composite array for each SD; where each SD starts storing its
/// triangles</param> <param name="gran_params">- parameters associated with the granular material</param> <param
/// name="mesh_params">- parameters associated with the triangle soup</param> <param
/// name="triangleFamilyHistmapOffset">- offset in the array of friction history (?)</param> <returns></returns>
__global__ void interactionGranMat_TriangleSoup(ChSystemGpuMesh_impl::TriangleSoupPtr d_triangleSoup,
ChSystemGpu_impl::GranSphereDataPtr sphere_data,
const unsigned int* SD_trianglesInEachSD_composite,
const unsigned int* SD_numTrianglesTouching,
const unsigned int* SD_TrianglesCompositeOffsets,
ChSystemGpu_impl::GranParamsPtr gran_params,
ChSystemGpuMesh_impl::MeshParamsPtr mesh_params,
unsigned int triangleFamilyHistmapOffset) {
__shared__ unsigned int triangleIDs[MAX_TRIANGLE_COUNT_PER_SD]; //!< global ID of the triangles touching this SD
__shared__ int3 sphere_pos_local[MAX_COUNT_OF_SPHERES_PER_SD]; //!< local coordinate of the sphere
__shared__ float3 sphere_vel[MAX_COUNT_OF_SPHERES_PER_SD];
// TODO figure out how we can do this better with no friction
__shared__ float3 omega[MAX_COUNT_OF_SPHERES_PER_SD];
__shared__ double3 node1[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 1st node of the triangle
__shared__ double3 node2[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 2nd node of the triangle
__shared__ double3 node3[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 3rd node of the triangle
// define an alias first
unsigned int thisSD = blockIdx.x;
if (SD_numTrianglesTouching[thisSD] == 0) {
return; // no triangle touches this block's SD
}
unsigned int spheresTouchingThisSD = sphere_data->SD_NumSpheresTouching[thisSD];
if (spheresTouchingThisSD == 0) {
return; // no sphere touches this block's SD
}
// Getting here means that there are both triangles and DEs in this SD.
unsigned int numSDTriangles = SD_numTrianglesTouching[thisSD];
unsigned int sphereIDLocal = threadIdx.x;
unsigned int sphereIDGlobal = NULL_CHGPU_ID;
// Bring in data from global into shmem. Only a subset of threads get to do this.
// Note that we're not using shared memory very heavily, so our bandwidth is pretty low
if (sphereIDLocal < spheresTouchingThisSD) {
size_t SD_composite_offset = sphere_data->SD_SphereCompositeOffsets[thisSD];
// TODO standardize this
size_t offset_in_composite_Array = SD_composite_offset + sphereIDLocal;
sphereIDGlobal = sphere_data->spheres_in_SD_composite[offset_in_composite_Array];
sphere_pos_local[sphereIDLocal] =
make_int3(sphere_data->sphere_local_pos_X[sphereIDGlobal], sphere_data->sphere_local_pos_Y[sphereIDGlobal],
sphere_data->sphere_local_pos_Z[sphereIDGlobal]);
unsigned int sphere_owner_SD = sphere_data->sphere_owner_SDs[sphereIDGlobal];
// if this SD doesn't own that sphere, add an offset to account
if (sphere_owner_SD != thisSD) {
sphere_pos_local[sphereIDLocal] =
sphere_pos_local[sphereIDLocal] + getOffsetFromSDs(thisSD, sphere_owner_SD, gran_params);
}
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
omega[sphereIDLocal] =
make_float3(sphere_data->sphere_Omega_X[sphereIDGlobal], sphere_data->sphere_Omega_Y[sphereIDGlobal],
sphere_data->sphere_Omega_Z[sphereIDGlobal]);
}
sphere_vel[sphereIDLocal] =
make_float3(sphere_data->pos_X_dt[sphereIDGlobal], sphere_data->pos_Y_dt[sphereIDGlobal],
sphere_data->pos_Z_dt[sphereIDGlobal]);
}
// Populate the shared memory with mesh triangle data
unsigned int tripsToCoverTriangles = (numSDTriangles + blockDim.x - 1) / blockDim.x;
unsigned int local_ID = threadIdx.x;
for (unsigned int triangTrip = 0; triangTrip < tripsToCoverTriangles; triangTrip++) {
if (local_ID < numSDTriangles) {
size_t SD_composite_offset = SD_TrianglesCompositeOffsets[thisSD];
if (SD_composite_offset == NULL_CHGPU_ID) {
ABORTABORTABORT("Invalid composite offset %lu for SD %u, touching %u triangles\n", NULL_CHGPU_ID,
thisSD, numSDTriangles);
}
size_t offset_in_composite_Array = SD_composite_offset + local_ID;
unsigned int globalID = SD_trianglesInEachSD_composite[offset_in_composite_Array];
triangleIDs[local_ID] = globalID;
// Read node positions from global memory into shared memory
// NOTE implicit cast from float to double here
unsigned int fam = d_triangleSoup->triangleFamily_ID[globalID];
node1[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node1[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
node2[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node2[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
node3[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node3[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
convert_pos_UU2SU<double3>(node1[local_ID], gran_params);
convert_pos_UU2SU<double3>(node2[local_ID], gran_params);
convert_pos_UU2SU<double3>(node3[local_ID], gran_params);
}
local_ID += blockDim.x;
}
__syncthreads(); // this call ensures data is in its place in shared memory
float3 sphere_force = {0.f, 0.f, 0.f};
float3 sphere_AngAcc = {0.f, 0.f, 0.f};
if (sphereIDLocal < spheresTouchingThisSD) {
// loop over each triangle in the SD and compute the force this sphere (thread) exerts on it
for (unsigned int triangleLocalID = 0; triangleLocalID < numSDTriangles; triangleLocalID++) {
/// we have a valid sphere and a valid triganle; check if in contact
float3 normal; // Unit normal from pt2 to pt1 (triangle contact point to sphere contact point)
float depth; // Negative in overlap
float3 pt1_float;
// Transform LRF to GRF
const unsigned int fam = d_triangleSoup->triangleFamily_ID[triangleIDs[triangleLocalID]];
bool valid_contact = false;
// vector from center of mesh body to contact point, assume this can be held in a float
float3 fromCenter;
{
double3 pt1; // Contact point on triangle
// NOTE sphere_pos_local is relative to THIS SD, not its owner SD
double3 sphCntr =
int64_t3_to_double3(convertPosLocalToGlobal(thisSD, sphere_pos_local[sphereIDLocal], gran_params));
valid_contact = face_sphere_cd(node1[triangleLocalID], node2[triangleLocalID], node3[triangleLocalID],
sphCntr, gran_params->sphereRadius_SU, normal, depth, pt1);
valid_contact = valid_contact &&
SDTripletID(pointSDTriplet(pt1.x, pt1.y, pt1.z, gran_params), gran_params) == thisSD;
pt1_float = make_float3(pt1.x, pt1.y, pt1.z);
double3 meshCenter_double =
make_double3(mesh_params->fam_frame_narrow[fam].pos[0], mesh_params->fam_frame_narrow[fam].pos[1],
mesh_params->fam_frame_narrow[fam].pos[2]);
convert_pos_UU2SU<double3>(meshCenter_double, gran_params);
double3 fromCenter_double = pt1 - meshCenter_double;
fromCenter = make_float3(fromCenter_double.x, fromCenter_double.y, fromCenter_double.z);
}
// If there is a collision, add an impulse to the sphere
if (valid_contact) {
// TODO contact models
// Use the CD information to compute the force on the grElement
float3 delta = -depth * normal;
// effective radius is just sphere radius -- assume meshes are locally flat (a safe assumption?)
float hertz_force_factor = sqrt(abs(depth) / gran_params->sphereRadius_SU);
float3 force_accum = hertz_force_factor * mesh_params->K_n_s2m_SU * delta;
// Compute force updates for adhesion term, opposite the spring term
// NOTE ratio is wrt the weight of a sphere of mass 1
// NOTE the cancelation of two negatives
force_accum = force_accum + gran_params->sphere_mass_SU * mesh_params->adhesionAcc_s2m * delta / depth;
// Velocity difference, it's better to do a coalesced access here than a fragmented access
// inside
float3 v_rel = sphere_vel[sphereIDLocal] - d_triangleSoup->vel[fam];
// TODO assumes pos is the center of mass of the mesh
// TODO can this be float?
float3 meshCenter =
make_float3(mesh_params->fam_frame_broad[fam].pos[0], mesh_params->fam_frame_broad[fam].pos[1],
mesh_params->fam_frame_broad[fam].pos[2]);
convert_pos_UU2SU<float3>(meshCenter, gran_params);
// NOTE depth is negative and normal points from triangle to sphere center
float3 r = pt1_float + normal * (depth / 2) - meshCenter;
// Add angular velocity contribution from mesh
v_rel = v_rel - Cross(d_triangleSoup->omega[fam], r);
// add tangential components if they exist
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// Vector from the center of sphere to center of contact volume
float3 r_A = -(gran_params->sphereRadius_SU + depth / 2.f) * normal;
v_rel = v_rel + Cross(omega[sphereIDLocal], r_A);
}
// Force accumulator on sphere for this sphere-triangle collision
// Compute force updates for normal spring term
// Compute force updates for damping term
// NOTE assumes sphere mass of 1
float fam_mass_SU = d_triangleSoup->familyMass_SU[fam];
const float sphere_mass_SU = gran_params->sphere_mass_SU;
float m_eff = sphere_mass_SU * fam_mass_SU / (sphere_mass_SU + fam_mass_SU);
float3 vrel_n = Dot(v_rel, normal) * normal;
v_rel = v_rel - vrel_n; // v_rel is now tangential relative velocity
// Add normal damping term
force_accum = force_accum - hertz_force_factor * mesh_params->Gamma_n_s2m_SU * m_eff * vrel_n;
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// radius pointing from the contact point to the center of particle
float3 Rc = (gran_params->sphereRadius_SU + depth / 2.f) * normal;
float3 roll_ang_acc = computeRollingAngAcc(
sphere_data, gran_params, mesh_params->rolling_coeff_s2m_SU, mesh_params->spinning_coeff_s2m_SU,
force_accum, omega[sphereIDLocal], d_triangleSoup->omega[fam], Rc);
sphere_AngAcc = sphere_AngAcc + roll_ang_acc;
unsigned int BC_histmap_label = triangleFamilyHistmapOffset + fam;
// compute tangent force
float3 tangent_force = computeFrictionForces(
gran_params, sphere_data, sphereIDGlobal, BC_histmap_label,
mesh_params->static_friction_coeff_s2m, mesh_params->K_t_s2m_SU, mesh_params->Gamma_t_s2m_SU,
hertz_force_factor, m_eff, force_accum, v_rel, normal);
float force_unit = gran_params->MASS_UNIT * gran_params->LENGTH_UNIT /
(gran_params->TIME_UNIT * gran_params->TIME_UNIT);
float velocity_unit = gran_params->LENGTH_UNIT / gran_params->TIME_UNIT;
force_accum = force_accum + tangent_force;
sphere_AngAcc =
sphere_AngAcc + Cross(-1.f * normal, tangent_force) / gran_params->sphereInertia_by_r;
}
// Use the CD information to compute the force and torque on the family of this triangle
sphere_force = sphere_force + force_accum;
// Force on the mesh is opposite the force on the sphere
float3 force_total = -1.f * force_accum;
float3 torque = Cross(fromCenter, force_total);
// TODO we could be much smarter about reducing this atomic write
unsigned int fam = d_triangleSoup->triangleFamily_ID[triangleIDs[triangleLocalID]];
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 0, force_total.x);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 1, force_total.y);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 2, force_total.z);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 3, torque.x);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 4, torque.y);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 5, torque.z);
}
} // end of per-triangle loop
// write back sphere forces
atomicAdd(sphere_data->sphere_acc_X + sphereIDGlobal, sphere_force.x / gran_params->sphere_mass_SU);
atomicAdd(sphere_data->sphere_acc_Y + sphereIDGlobal, sphere_force.y / gran_params->sphere_mass_SU);
atomicAdd(sphere_data->sphere_acc_Z + sphereIDGlobal, sphere_force.z / gran_params->sphere_mass_SU);
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// write back torques for later
atomicAdd(sphere_data->sphere_ang_acc_X + sphereIDGlobal, sphere_AngAcc.x);
atomicAdd(sphere_data->sphere_ang_acc_Y + sphereIDGlobal, sphere_AngAcc.y);
atomicAdd(sphere_data->sphere_ang_acc_Z + sphereIDGlobal, sphere_AngAcc.z);
}
} // end sphere id check
} // end kernel
__host__ double ChSystemGpuMesh_impl::AdvanceSimulation(float duration) {
// Figure our the number of blocks that need to be launched to cover the box
unsigned int nBlocks = (nSpheres + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK;
// Settling simulation loop.
float duration_SU = (float)(duration / TIME_SU2UU);
unsigned int nsteps = (unsigned int)std::round(duration_SU / stepSize_SU);
packSphereDataPointers();
// hipMemAdvise(gran_params, sizeof(*gran_params), hipMemAdviseSetReadMostly, dev_ID);
METRICS_PRINTF("advancing by %f at timestep %f, %u timesteps at approx user timestep %f\n", duration_SU,
stepSize_SU, nsteps, duration / nsteps);
METRICS_PRINTF("Starting Main Simulation loop!\n");
float time_elapsed_SU = 0.f; // time elapsed in this call (SU)
// Run the simulation, there are aggressive synchronizations because we want to have no race conditions
for (; time_elapsed_SU < stepSize_SU * nsteps; time_elapsed_SU += stepSize_SU) {
updateBCPositions();
runSphereBroadphase();
resetSphereAccelerations();
resetBCForces();
if (meshSoup->nTrianglesInSoup != 0 && mesh_collision_enabled) {
gpuErrchk(
hipMemset(meshSoup->generalizedForcesPerFamily, 0, 6 * meshSoup->numTriangleFamilies * sizeof(float)));
}
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
if (meshSoup->nTrianglesInSoup != 0 && mesh_collision_enabled) {
runTriangleBroadphase();
}
METRICS_PRINTF("Starting computeSphereForces!\n");
if (gran_params->friction_mode == CHGPU_FRICTION_MODE::FRICTIONLESS) {
// Compute sphere-sphere forces
if (gran_params->use_mat_based == true) {
METRICS_PRINTF("use material based model\n");
hipLaunchKernelGGL(( computeSphereForces_frictionless_matBased), dim3(nSDs), dim3(MAX_COUNT_OF_SPHERES_PER_SD), 0, 0,
sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(),
(unsigned int)BC_params_list_SU.size());
} else {
METRICS_PRINTF("use user defined model\n");
hipLaunchKernelGGL(( computeSphereForces_frictionless), dim3(nSDs), dim3(MAX_COUNT_OF_SPHERES_PER_SD), 0, 0,
sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(),
(unsigned int)BC_params_list_SU.size());
}
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
// frictional contact
else if (gran_params->friction_mode == CHGPU_FRICTION_MODE::SINGLE_STEP ||
gran_params->friction_mode == CHGPU_FRICTION_MODE::MULTI_STEP) {
// figure out who is contacting
hipLaunchKernelGGL(( determineContactPairs), dim3(nSDs), dim3(MAX_COUNT_OF_SPHERES_PER_SD), 0, 0, sphere_data, gran_params);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
METRICS_PRINTF("Frictional case.\n");
if (gran_params->use_mat_based == true) {
METRICS_PRINTF("compute sphere-sphere and sphere-bc mat based\n");
hipLaunchKernelGGL(( computeSphereContactForces_matBased), dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0,
sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(),
(unsigned int)BC_params_list_SU.size(), nSpheres);
} else {
METRICS_PRINTF("compute sphere-sphere and sphere-bc user defined\n");
hipLaunchKernelGGL(( computeSphereContactForces), dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0,
sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(),
(unsigned int)BC_params_list_SU.size(), nSpheres);
}
}
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
if (meshSoup->numTriangleFamilies != 0 && mesh_collision_enabled) {
// TODO please do not use a template here
// triangle labels come after BC labels numerically
unsigned int triangleFamilyHistmapOffset =
gran_params->nSpheres + 1 + (unsigned int)BC_params_list_SU.size() + 1;
// compute sphere-triangle forces
if (tri_params->use_mat_based == true) {
hipLaunchKernelGGL(( interactionGranMat_TriangleSoup_matBased), dim3(nSDs), dim3(MAX_COUNT_OF_SPHERES_PER_SD), 0, 0,
meshSoup, sphere_data, SD_trianglesInEachSD_composite.data(), SD_numTrianglesTouching.data(),
SD_TrianglesCompositeOffsets.data(), gran_params, tri_params, triangleFamilyHistmapOffset);
} else {
// // printf("compute sphere-mesh user defined\n");
hipLaunchKernelGGL(( interactionGranMat_TriangleSoup), dim3(nSDs), dim3(MAX_COUNT_OF_SPHERES_PER_SD), 0, 0,
meshSoup, sphere_data, SD_trianglesInEachSD_composite.data(), SD_numTrianglesTouching.data(),
SD_TrianglesCompositeOffsets.data(), gran_params, tri_params, triangleFamilyHistmapOffset);
}
}
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
METRICS_PRINTF("Starting integrateSpheres!\n");
hipLaunchKernelGGL(( integrateSpheres), dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, stepSize_SU, sphere_data, nSpheres, gran_params);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
if (gran_params->friction_mode != CHGPU_FRICTION_MODE::FRICTIONLESS) {
const unsigned int nThreadsUpdateHist = 2 * CUDA_THREADS_PER_BLOCK;
unsigned int fricMapSize = nSpheres * MAX_SPHERES_TOUCHED_BY_SPHERE;
unsigned int nBlocksFricHistoryPostProcess = (fricMapSize + nThreadsUpdateHist - 1) / nThreadsUpdateHist;
hipLaunchKernelGGL(( updateFrictionData), dim3(nBlocksFricHistoryPostProcess), dim3(nThreadsUpdateHist), 0, 0, fricMapSize, sphere_data,
gran_params);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( updateAngVels), dim3(nBlocks), dim3(CUDA_THREADS_PER_BLOCK), 0, 0, stepSize_SU, sphere_data, nSpheres, gran_params);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
elapsedSimTime += (float)(stepSize_SU * TIME_SU2UU); // Advance current time
}
return time_elapsed_SU * TIME_SU2UU; // return elapsed UU time
}
} // namespace gpu
} // namespace chrono
|
b827fbced20415eb1d312a6a66e3fa2e6e62ac0e.cu
|
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2019 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Conlain Kelly, Nic Olsen, Dan Negrut, Ruochun Zhang
// =============================================================================
#include "chrono_gpu/cuda/ChGpu_SMC_trimesh.cuh"
#include "chrono_gpu/cuda/ChGpu_SMC.cuh"
#include "chrono_gpu/physics/ChSystemGpuMesh_impl.h"
#include "chrono_gpu/utils/ChGpuUtilities.h"
#include <math_constants.h>
namespace chrono {
namespace gpu {
__host__ void ChSystemGpuMesh_impl::runTriangleBroadphase() {
METRICS_PRINTF("Resetting broadphase info!\n");
unsigned int numTriangles = meshSoup->nTrianglesInSoup;
unsigned int nblocks = (numTriangles + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK;
determineCountOfSDsTouchedByEachTriangle<<<nblocks, CUDA_THREADS_PER_BLOCK>>>(
meshSoup, Triangle_NumSDsTouching.data(), gran_params, tri_params);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
// do prefix scan
size_t temp_storage_bytes = 0;
unsigned int* out_ptr = Triangle_SDsCompositeOffsets.data();
unsigned int* in_ptr = Triangle_NumSDsTouching.data();
// copy data into the tmp array
gpuErrchk(cudaMemcpy(out_ptr, in_ptr, numTriangles * sizeof(unsigned int), cudaMemcpyDeviceToDevice));
cub::DeviceScan::ExclusiveSum(NULL, temp_storage_bytes, in_ptr, out_ptr, numTriangles);
gpuErrchk(cudaDeviceSynchronize());
// get pointer to device memory; this memory block will be used internally by CUB, for scratch area
void* d_scratch_space = (void*)stateOfSolver_resources.pDeviceMemoryScratchSpace(temp_storage_bytes);
// Run exclusive prefix sum
cub::DeviceScan::ExclusiveSum(d_scratch_space, temp_storage_bytes, in_ptr, out_ptr, numTriangles);
gpuErrchk(cudaDeviceSynchronize());
unsigned int numOfTriangleTouchingSD_instances; // total number of instances in which a triangle touches an SD
numOfTriangleTouchingSD_instances = out_ptr[numTriangles - 1] + in_ptr[numTriangles - 1];
// resize, if need be, several dummy vectors that handle in managed memory
SDsTouchedByEachTriangle_composite_out.resize(numOfTriangleTouchingSD_instances, NULL_CHGPU_ID);
SDsTouchedByEachTriangle_composite.resize(numOfTriangleTouchingSD_instances, NULL_CHGPU_ID);
TriangleIDS_ByMultiplicity_out.resize(numOfTriangleTouchingSD_instances, NULL_CHGPU_ID);
TriangleIDS_ByMultiplicity.resize(numOfTriangleTouchingSD_instances, NULL_CHGPU_ID);
// sort key-value where the key is SD id, value is triangle ID in composite array
storeSDsTouchedByEachTriangle<<<nblocks, CUDA_THREADS_PER_BLOCK>>>(
meshSoup, Triangle_NumSDsTouching.data(), Triangle_SDsCompositeOffsets.data(),
SDsTouchedByEachTriangle_composite.data(), TriangleIDS_ByMultiplicity.data(), gran_params, tri_params);
gpuErrchk(cudaDeviceSynchronize());
unsigned int* d_keys_in = SDsTouchedByEachTriangle_composite.data();
unsigned int* d_keys_out = SDsTouchedByEachTriangle_composite_out.data();
unsigned int* d_values_in = TriangleIDS_ByMultiplicity.data();
unsigned int* d_values_out = TriangleIDS_ByMultiplicity_out.data();
// Run CUB sorting operation, key-value type.
// Key: the ID of the SD.
// Value: the ID of the triangle that touches the "Key" SD.
// The outcome of the sort operation will look like this:
// SDs: 23 23 23 89 89 89 89 107 107 107 etc.
// Triangle: 5 9 17 43 67 108 221 6 12 298 etc.
// First, determine temporary device storage requirements; pass null, CUB tells us what it needs
cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out,
numOfTriangleTouchingSD_instances);
gpuErrchk(cudaDeviceSynchronize());
// get pointer to device memory; this memory block will be used internally by CUB
d_scratch_space = (void*)stateOfSolver_resources.pDeviceMemoryScratchSpace(temp_storage_bytes);
cub::DeviceRadixSort::SortPairs(d_scratch_space, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in,
d_values_out, numOfTriangleTouchingSD_instances);
gpuErrchk(cudaDeviceSynchronize());
// We started with SDs touching a triangle; we just flipped this through the key-value sort. That is, we now
// know the collection of triangles that touch each SD; SD by SD.
SD_trianglesInEachSD_composite.resize(TriangleIDS_ByMultiplicity_out.size());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(SD_trianglesInEachSD_composite.data(), TriangleIDS_ByMultiplicity_out.data(),
numOfTriangleTouchingSD_instances * sizeof(unsigned int), cudaMemcpyDeviceToDevice));
// The CUB encode operation below will tell us what SDs are actually touched by triangles, and how many triangles
// touch each SD.
//
// "d_in" is SDsTouchedByEachTriangle_composite_out; contains the IDs of the SDs that have triangles in them; if an
// SD is touched by "t" triangles, it'll show up "t" times in this array
unsigned int* d_in = d_keys_out;
// d_unique_out stores a list of *unique* SDs with the following property: each SD in this list has at least one
// triangle touching it. In terms of memory, this is pretty wasteful since it's unilkely that all SDs are touched by
// at least one triangle; perhaps revisit later.
unsigned int* d_unique_out =
(unsigned int*)stateOfSolver_resources.pDeviceMemoryScratchSpace(nSDs * sizeof(unsigned int));
// squatting on SD_TrianglesCompositeOffsets device vector; its size is nSDs. Works in tandem with d_unique_out.
// If d_unique_out[4]=72, d_counts_out[4] says how many triangles touch SD 72.
unsigned int* d_counts_out = SD_TrianglesCompositeOffsets.data();
// squatting on TriangleIDS_ByMultiplicity, which is not needed anymore. We're using only *one* entry in this array.
// Output value represents the number of SDs that have at last one triangle touching the SD
unsigned int* d_num_runs_out = Triangle_SDsCompositeOffsets.data();
// dry run, figure out the number of bytes that will be used in the actual run
cub::DeviceRunLengthEncode::Encode(NULL, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out,
numOfTriangleTouchingSD_instances);
gpuErrchk(cudaDeviceSynchronize());
d_scratch_space = TriangleIDS_ByMultiplicity.data();
// Run the actual encoding operation
cub::DeviceRunLengthEncode::Encode(d_scratch_space, temp_storage_bytes, d_in, d_unique_out, d_counts_out,
d_num_runs_out, numOfTriangleTouchingSD_instances);
gpuErrchk(cudaDeviceSynchronize());
// SD_numTrianglesTouching contains only zeros
// compute offsets in SD_trianglesInEachSD_composite and also counts for how many triangles touch each SD.
// Start by zeroing out, it's important since not all entries will be touched in
gpuErrchk(cudaMemset(SD_numTrianglesTouching.data(), 0, nSDs * sizeof(unsigned int)));
nblocks = ((*d_num_runs_out) + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK;
if (nblocks > 0) {
finalizeSD_numTrianglesTouching<<<nblocks, CUDA_THREADS_PER_BLOCK>>>(d_unique_out, d_counts_out, d_num_runs_out,
SD_numTrianglesTouching.data());
gpuErrchk(cudaDeviceSynchronize());
}
// Now assert that no SD has over max amount of triangles
// If there is one, exit graciously
in_ptr = SD_numTrianglesTouching.data();
// Just borrow the first element of SD_TrianglesCompositeOffsets to store the max value
unsigned int* maxTriCount = SD_TrianglesCompositeOffsets.data();
cub::DeviceReduce::Max(NULL, temp_storage_bytes, in_ptr, maxTriCount, nSDs);
gpuErrchk(cudaDeviceSynchronize());
d_scratch_space = (void*)stateOfSolver_resources.pDeviceMemoryScratchSpace(temp_storage_bytes);
cub::DeviceReduce::Max(d_scratch_space, temp_storage_bytes, in_ptr, maxTriCount, nSDs);
gpuErrchk(cudaDeviceSynchronize());
if (*maxTriCount > MAX_TRIANGLE_COUNT_PER_SD)
CHGPU_ERROR("ERROR! %u triangles are found in one of the SDs! The max allowance is %u.\n", *maxTriCount,
MAX_TRIANGLE_COUNT_PER_SD);
// Lastly, we need to do a CUB prefix scan to get the offsets in the big composite array
in_ptr = SD_numTrianglesTouching.data();
out_ptr = SD_TrianglesCompositeOffsets.data();
cub::DeviceScan::ExclusiveSum(NULL, temp_storage_bytes, in_ptr, out_ptr, nSDs);
gpuErrchk(cudaDeviceSynchronize());
d_scratch_space = (void*)stateOfSolver_resources.pDeviceMemoryScratchSpace(temp_storage_bytes);
// Run CUB exclusive prefix sum
cub::DeviceScan::ExclusiveSum(d_scratch_space, temp_storage_bytes, in_ptr, out_ptr, nSDs);
gpuErrchk(cudaDeviceSynchronize());
}
__global__ void interactionGranMat_TriangleSoup_matBased(ChSystemGpuMesh_impl::TriangleSoupPtr d_triangleSoup,
ChSystemGpu_impl::GranSphereDataPtr sphere_data,
const unsigned int* SD_trianglesInEachSD_composite,
const unsigned int* SD_numTrianglesTouching,
const unsigned int* SD_TrianglesCompositeOffsets,
ChSystemGpu_impl::GranParamsPtr gran_params,
ChSystemGpuMesh_impl::MeshParamsPtr mesh_params,
unsigned int triangleFamilyHistmapOffset) {
__shared__ unsigned int triangleIDs[MAX_TRIANGLE_COUNT_PER_SD]; //!< global ID of the triangles touching this SD
__shared__ int3 sphere_pos_local[MAX_COUNT_OF_SPHERES_PER_SD]; //!< local coordinate of the sphere
__shared__ float3 sphere_vel[MAX_COUNT_OF_SPHERES_PER_SD];
// TODO figure out how we can do this better with no friction
__shared__ float3 omega[MAX_COUNT_OF_SPHERES_PER_SD];
__shared__ double3 node1[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 1st node of the triangle
__shared__ double3 node2[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 2nd node of the triangle
__shared__ double3 node3[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 3rd node of the triangle
// define an alias first
unsigned int thisSD = blockIdx.x;
if (SD_numTrianglesTouching[thisSD] == 0) {
return; // no triangle touches this block's SD
}
unsigned int spheresTouchingThisSD = sphere_data->SD_NumSpheresTouching[thisSD];
if (spheresTouchingThisSD == 0) {
return; // no sphere touches this block's SD
}
// Getting here means that there are both triangles and DEs in this SD.
unsigned int numSDTriangles = SD_numTrianglesTouching[thisSD];
unsigned int sphereIDLocal = threadIdx.x;
unsigned int sphereIDGlobal = NULL_CHGPU_ID;
// Bring in data from global into shmem. Only a subset of threads get to do this.
// Note that we're not using shared memory very heavily, so our bandwidth is pretty low
if (sphereIDLocal < spheresTouchingThisSD) {
size_t SD_composite_offset = sphere_data->SD_SphereCompositeOffsets[thisSD];
// TODO standardize this
size_t offset_in_composite_Array = SD_composite_offset + sphereIDLocal;
sphereIDGlobal = sphere_data->spheres_in_SD_composite[offset_in_composite_Array];
sphere_pos_local[sphereIDLocal] =
make_int3(sphere_data->sphere_local_pos_X[sphereIDGlobal], sphere_data->sphere_local_pos_Y[sphereIDGlobal],
sphere_data->sphere_local_pos_Z[sphereIDGlobal]);
unsigned int sphere_owner_SD = sphere_data->sphere_owner_SDs[sphereIDGlobal];
// if this SD doesn't own that sphere, add an offset to account
if (sphere_owner_SD != thisSD) {
sphere_pos_local[sphereIDLocal] =
sphere_pos_local[sphereIDLocal] + getOffsetFromSDs(thisSD, sphere_owner_SD, gran_params);
}
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
omega[sphereIDLocal] =
make_float3(sphere_data->sphere_Omega_X[sphereIDGlobal], sphere_data->sphere_Omega_Y[sphereIDGlobal],
sphere_data->sphere_Omega_Z[sphereIDGlobal]);
}
sphere_vel[sphereIDLocal] =
make_float3(sphere_data->pos_X_dt[sphereIDGlobal], sphere_data->pos_Y_dt[sphereIDGlobal],
sphere_data->pos_Z_dt[sphereIDGlobal]);
}
// Populate the shared memory with mesh triangle data
unsigned int tripsToCoverTriangles = (numSDTriangles + blockDim.x - 1) / blockDim.x;
unsigned int local_ID = threadIdx.x;
for (unsigned int triangTrip = 0; triangTrip < tripsToCoverTriangles; triangTrip++) {
if (local_ID < numSDTriangles) {
size_t SD_composite_offset = SD_TrianglesCompositeOffsets[thisSD];
if (SD_composite_offset == NULL_CHGPU_ID) {
ABORTABORTABORT("Invalid composite offset %lu for SD %u, touching %u triangles\n", NULL_CHGPU_ID,
thisSD, numSDTriangles);
}
size_t offset_in_composite_Array = SD_composite_offset + local_ID;
unsigned int globalID = SD_trianglesInEachSD_composite[offset_in_composite_Array];
triangleIDs[local_ID] = globalID;
// Read node positions from global memory into shared memory
// NOTE implicit cast from float to double here
unsigned int fam = d_triangleSoup->triangleFamily_ID[globalID];
node1[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node1[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
node2[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node2[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
node3[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node3[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
convert_pos_UU2SU<double3>(node1[local_ID], gran_params);
convert_pos_UU2SU<double3>(node2[local_ID], gran_params);
convert_pos_UU2SU<double3>(node3[local_ID], gran_params);
}
local_ID += blockDim.x;
}
__syncthreads(); // this call ensures data is in its place in shared memory
float3 sphere_force = {0.f, 0.f, 0.f};
float3 sphere_AngAcc = {0.f, 0.f, 0.f};
if (sphereIDLocal < spheresTouchingThisSD) {
// loop over each triangle in the SD and compute the force this sphere (thread) exerts on it
for (unsigned int triangleLocalID = 0; triangleLocalID < numSDTriangles; triangleLocalID++) {
/// we have a valid sphere and a valid triganle; check if in contact
float3 normal; // Unit normal from pt2 to pt1 (triangle contact point to sphere contact point)
float depth; // Negative in overlap
float3 pt1_float;
// Transform LRF to GRF
const unsigned int fam = d_triangleSoup->triangleFamily_ID[triangleIDs[triangleLocalID]];
bool valid_contact = false;
// vector from center of mesh body to contact point, assume this can be held in a float
float3 fromCenter;
{
double3 pt1; // Contact point on triangle
// NOTE sphere_pos_local is relative to THIS SD, not its owner SD
double3 sphCntr =
int64_t3_to_double3(convertPosLocalToGlobal(thisSD, sphere_pos_local[sphereIDLocal], gran_params));
valid_contact = face_sphere_cd(node1[triangleLocalID], node2[triangleLocalID], node3[triangleLocalID],
sphCntr, gran_params->sphereRadius_SU, normal, depth, pt1);
valid_contact = valid_contact &&
SDTripletID(pointSDTriplet(pt1.x, pt1.y, pt1.z, gran_params), gran_params) == thisSD;
pt1_float = make_float3(pt1.x, pt1.y, pt1.z);
double3 meshCenter_double =
make_double3(mesh_params->fam_frame_narrow[fam].pos[0], mesh_params->fam_frame_narrow[fam].pos[1],
mesh_params->fam_frame_narrow[fam].pos[2]);
convert_pos_UU2SU<double3>(meshCenter_double, gran_params);
double3 fromCenter_double = pt1 - meshCenter_double;
fromCenter = make_float3(fromCenter_double.x, fromCenter_double.y, fromCenter_double.z);
}
// If there is a collision, add an impulse to the sphere
if (valid_contact) {
// TODO contact models
// Use the CD information to compute the force on the grElement
// normal points from triangle to sphere
float3 delta = -depth * normal;
// effective radius is just sphere radius -- assume meshes are locally flat (a safe assumption?)
// float hertz_force_factor = sqrt(abs(depth) / gran_params->sphereRadius_SU);
// helper variables
float sqrt_Rd = sqrt(abs(depth) * gran_params->sphereRadius_SU);
float Sn = 2. * mesh_params->E_eff_s2m_SU * sqrt_Rd;
float loge = (mesh_params->COR_s2m_SU < EPSILON) ? log(EPSILON) : log(mesh_params->COR_s2m_SU);
float beta = loge / sqrt(loge * loge + CUDART_PI_F * CUDART_PI_F);
// effective mass = mass_mesh * mass_sphere / (m_mesh + mass_sphere)
float fam_mass_SU = d_triangleSoup->familyMass_SU[fam];
const float sphere_mass_SU = gran_params->sphere_mass_SU;
float m_eff = sphere_mass_SU * fam_mass_SU / (sphere_mass_SU + fam_mass_SU);
// stiffness and damping coefficient
float kn = (2.0 / 3.0) * Sn;
float gn = 2 * sqrt(5.0 / 6.0) * beta * sqrt(Sn * m_eff);
// relative velocity = v_sphere - v_mesh
float3 v_rel = sphere_vel[sphereIDLocal] - d_triangleSoup->vel[fam];
// assumes pos is the center of mass of the mesh
float3 meshCenter =
make_float3(mesh_params->fam_frame_broad[fam].pos[0], mesh_params->fam_frame_broad[fam].pos[1],
mesh_params->fam_frame_broad[fam].pos[2]);
convert_pos_UU2SU<float3>(meshCenter, gran_params);
// NOTE depth is negative and normal points from triangle to sphere center
float3 r = pt1_float + normal * (depth / 2) - meshCenter;
// Add angular velocity contribution from mesh
v_rel = v_rel - Cross(d_triangleSoup->omega[fam], r);
// add tangential components if they exist
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// Vector from the center of sphere to center of contact volume
float3 r_A = -(gran_params->sphereRadius_SU + depth / 2.f) * normal;
v_rel = v_rel + Cross(omega[sphereIDLocal], r_A);
}
// normal component of relative velocity
float projection = Dot(v_rel, normal);
// tangential component of relative velocity
float3 vrel_t = v_rel - projection * normal;
// normal force magnitude
float forceN_mag = -kn * depth + gn * projection;
float3 force_accum = forceN_mag * normal;
// Compute force updates for adhesion term, opposite the spring term
// NOTE ratio is wrt the weight of a sphere of mass 1
// NOTE the cancelation of two negatives
force_accum = force_accum + gran_params->sphere_mass_SU * mesh_params->adhesionAcc_s2m * delta / depth;
// tangential component
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// radius pointing from the contact point to the center of particle
float3 Rc = (gran_params->sphereRadius_SU + depth / 2.f) * normal;
float3 roll_ang_acc = computeRollingAngAcc(
sphere_data, gran_params, mesh_params->rolling_coeff_s2m_SU, mesh_params->spinning_coeff_s2m_SU,
force_accum, omega[sphereIDLocal], d_triangleSoup->omega[fam], Rc);
sphere_AngAcc = sphere_AngAcc + roll_ang_acc;
unsigned int BC_histmap_label = triangleFamilyHistmapOffset + fam;
// compute tangent force
float3 tangent_force = computeFrictionForces_matBased(
gran_params, sphere_data, sphereIDGlobal, BC_histmap_label,
mesh_params->static_friction_coeff_s2m, mesh_params->E_eff_s2m_SU, mesh_params->G_eff_s2m_SU,
sqrt_Rd, beta, force_accum, vrel_t, normal, m_eff);
float force_unit = gran_params->MASS_UNIT * gran_params->LENGTH_UNIT /
(gran_params->TIME_UNIT * gran_params->TIME_UNIT);
float velocity_unit = gran_params->LENGTH_UNIT / gran_params->TIME_UNIT;
force_accum = force_accum + tangent_force;
sphere_AngAcc =
sphere_AngAcc + Cross(-1.f * normal, tangent_force) / gran_params->sphereInertia_by_r;
}
// Use the CD information to compute the force and torque on the family of this triangle
sphere_force = sphere_force + force_accum;
// Force on the mesh is opposite the force on the sphere
float3 force_total = -1.f * force_accum;
float3 torque = Cross(fromCenter, force_total);
// TODO we could be much smarter about reducing this atomic write
unsigned int fam = d_triangleSoup->triangleFamily_ID[triangleIDs[triangleLocalID]];
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 0, force_total.x);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 1, force_total.y);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 2, force_total.z);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 3, torque.x);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 4, torque.y);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 5, torque.z);
}
} // end of per-triangle loop
// write back sphere forces
atomicAdd(sphere_data->sphere_acc_X + sphereIDGlobal, sphere_force.x / gran_params->sphere_mass_SU);
atomicAdd(sphere_data->sphere_acc_Y + sphereIDGlobal, sphere_force.y / gran_params->sphere_mass_SU);
atomicAdd(sphere_data->sphere_acc_Z + sphereIDGlobal, sphere_force.z / gran_params->sphere_mass_SU);
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// write back torques for later
atomicAdd(sphere_data->sphere_ang_acc_X + sphereIDGlobal, sphere_AngAcc.x);
atomicAdd(sphere_data->sphere_ang_acc_Y + sphereIDGlobal, sphere_AngAcc.y);
atomicAdd(sphere_data->sphere_ang_acc_Z + sphereIDGlobal, sphere_AngAcc.z);
}
} // end sphere id check
} // end kernel
/// <summary>
/// Kernel accounts for the interaction between the granular material and the triangles making up the triangle soup
/// </summary>
/// <param name="d_triangleSoup">- information about triangle soup (in device mem.)</param>
/// <param name="sphere_data">- data structure containing pointers to granular-material related info</param>
/// <param name="SD_trianglesInEachSD_composite">- array saying which triangles touch an SD; has information for each
/// SD</param> <param name="SD_numTrianglesTouching">- number of triangles touching each SD</param> <param
/// name="SD_TrianglesCompositeOffsets">- offsets in the composite array for each SD; where each SD starts storing its
/// triangles</param> <param name="gran_params">- parameters associated with the granular material</param> <param
/// name="mesh_params">- parameters associated with the triangle soup</param> <param
/// name="triangleFamilyHistmapOffset">- offset in the array of friction history (?)</param> <returns></returns>
__global__ void interactionGranMat_TriangleSoup(ChSystemGpuMesh_impl::TriangleSoupPtr d_triangleSoup,
ChSystemGpu_impl::GranSphereDataPtr sphere_data,
const unsigned int* SD_trianglesInEachSD_composite,
const unsigned int* SD_numTrianglesTouching,
const unsigned int* SD_TrianglesCompositeOffsets,
ChSystemGpu_impl::GranParamsPtr gran_params,
ChSystemGpuMesh_impl::MeshParamsPtr mesh_params,
unsigned int triangleFamilyHistmapOffset) {
__shared__ unsigned int triangleIDs[MAX_TRIANGLE_COUNT_PER_SD]; //!< global ID of the triangles touching this SD
__shared__ int3 sphere_pos_local[MAX_COUNT_OF_SPHERES_PER_SD]; //!< local coordinate of the sphere
__shared__ float3 sphere_vel[MAX_COUNT_OF_SPHERES_PER_SD];
// TODO figure out how we can do this better with no friction
__shared__ float3 omega[MAX_COUNT_OF_SPHERES_PER_SD];
__shared__ double3 node1[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 1st node of the triangle
__shared__ double3 node2[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 2nd node of the triangle
__shared__ double3 node3[MAX_TRIANGLE_COUNT_PER_SD]; //!< Coordinates of the 3rd node of the triangle
// define an alias first
unsigned int thisSD = blockIdx.x;
if (SD_numTrianglesTouching[thisSD] == 0) {
return; // no triangle touches this block's SD
}
unsigned int spheresTouchingThisSD = sphere_data->SD_NumSpheresTouching[thisSD];
if (spheresTouchingThisSD == 0) {
return; // no sphere touches this block's SD
}
// Getting here means that there are both triangles and DEs in this SD.
unsigned int numSDTriangles = SD_numTrianglesTouching[thisSD];
unsigned int sphereIDLocal = threadIdx.x;
unsigned int sphereIDGlobal = NULL_CHGPU_ID;
// Bring in data from global into shmem. Only a subset of threads get to do this.
// Note that we're not using shared memory very heavily, so our bandwidth is pretty low
if (sphereIDLocal < spheresTouchingThisSD) {
size_t SD_composite_offset = sphere_data->SD_SphereCompositeOffsets[thisSD];
// TODO standardize this
size_t offset_in_composite_Array = SD_composite_offset + sphereIDLocal;
sphereIDGlobal = sphere_data->spheres_in_SD_composite[offset_in_composite_Array];
sphere_pos_local[sphereIDLocal] =
make_int3(sphere_data->sphere_local_pos_X[sphereIDGlobal], sphere_data->sphere_local_pos_Y[sphereIDGlobal],
sphere_data->sphere_local_pos_Z[sphereIDGlobal]);
unsigned int sphere_owner_SD = sphere_data->sphere_owner_SDs[sphereIDGlobal];
// if this SD doesn't own that sphere, add an offset to account
if (sphere_owner_SD != thisSD) {
sphere_pos_local[sphereIDLocal] =
sphere_pos_local[sphereIDLocal] + getOffsetFromSDs(thisSD, sphere_owner_SD, gran_params);
}
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
omega[sphereIDLocal] =
make_float3(sphere_data->sphere_Omega_X[sphereIDGlobal], sphere_data->sphere_Omega_Y[sphereIDGlobal],
sphere_data->sphere_Omega_Z[sphereIDGlobal]);
}
sphere_vel[sphereIDLocal] =
make_float3(sphere_data->pos_X_dt[sphereIDGlobal], sphere_data->pos_Y_dt[sphereIDGlobal],
sphere_data->pos_Z_dt[sphereIDGlobal]);
}
// Populate the shared memory with mesh triangle data
unsigned int tripsToCoverTriangles = (numSDTriangles + blockDim.x - 1) / blockDim.x;
unsigned int local_ID = threadIdx.x;
for (unsigned int triangTrip = 0; triangTrip < tripsToCoverTriangles; triangTrip++) {
if (local_ID < numSDTriangles) {
size_t SD_composite_offset = SD_TrianglesCompositeOffsets[thisSD];
if (SD_composite_offset == NULL_CHGPU_ID) {
ABORTABORTABORT("Invalid composite offset %lu for SD %u, touching %u triangles\n", NULL_CHGPU_ID,
thisSD, numSDTriangles);
}
size_t offset_in_composite_Array = SD_composite_offset + local_ID;
unsigned int globalID = SD_trianglesInEachSD_composite[offset_in_composite_Array];
triangleIDs[local_ID] = globalID;
// Read node positions from global memory into shared memory
// NOTE implicit cast from float to double here
unsigned int fam = d_triangleSoup->triangleFamily_ID[globalID];
node1[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node1[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
node2[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node2[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
node3[local_ID] = apply_frame_transform<double, float3, double3>(
d_triangleSoup->node3[globalID], mesh_params->fam_frame_narrow[fam].pos,
mesh_params->fam_frame_narrow[fam].rot_mat);
convert_pos_UU2SU<double3>(node1[local_ID], gran_params);
convert_pos_UU2SU<double3>(node2[local_ID], gran_params);
convert_pos_UU2SU<double3>(node3[local_ID], gran_params);
}
local_ID += blockDim.x;
}
__syncthreads(); // this call ensures data is in its place in shared memory
float3 sphere_force = {0.f, 0.f, 0.f};
float3 sphere_AngAcc = {0.f, 0.f, 0.f};
if (sphereIDLocal < spheresTouchingThisSD) {
// loop over each triangle in the SD and compute the force this sphere (thread) exerts on it
for (unsigned int triangleLocalID = 0; triangleLocalID < numSDTriangles; triangleLocalID++) {
/// we have a valid sphere and a valid triganle; check if in contact
float3 normal; // Unit normal from pt2 to pt1 (triangle contact point to sphere contact point)
float depth; // Negative in overlap
float3 pt1_float;
// Transform LRF to GRF
const unsigned int fam = d_triangleSoup->triangleFamily_ID[triangleIDs[triangleLocalID]];
bool valid_contact = false;
// vector from center of mesh body to contact point, assume this can be held in a float
float3 fromCenter;
{
double3 pt1; // Contact point on triangle
// NOTE sphere_pos_local is relative to THIS SD, not its owner SD
double3 sphCntr =
int64_t3_to_double3(convertPosLocalToGlobal(thisSD, sphere_pos_local[sphereIDLocal], gran_params));
valid_contact = face_sphere_cd(node1[triangleLocalID], node2[triangleLocalID], node3[triangleLocalID],
sphCntr, gran_params->sphereRadius_SU, normal, depth, pt1);
valid_contact = valid_contact &&
SDTripletID(pointSDTriplet(pt1.x, pt1.y, pt1.z, gran_params), gran_params) == thisSD;
pt1_float = make_float3(pt1.x, pt1.y, pt1.z);
double3 meshCenter_double =
make_double3(mesh_params->fam_frame_narrow[fam].pos[0], mesh_params->fam_frame_narrow[fam].pos[1],
mesh_params->fam_frame_narrow[fam].pos[2]);
convert_pos_UU2SU<double3>(meshCenter_double, gran_params);
double3 fromCenter_double = pt1 - meshCenter_double;
fromCenter = make_float3(fromCenter_double.x, fromCenter_double.y, fromCenter_double.z);
}
// If there is a collision, add an impulse to the sphere
if (valid_contact) {
// TODO contact models
// Use the CD information to compute the force on the grElement
float3 delta = -depth * normal;
// effective radius is just sphere radius -- assume meshes are locally flat (a safe assumption?)
float hertz_force_factor = sqrt(abs(depth) / gran_params->sphereRadius_SU);
float3 force_accum = hertz_force_factor * mesh_params->K_n_s2m_SU * delta;
// Compute force updates for adhesion term, opposite the spring term
// NOTE ratio is wrt the weight of a sphere of mass 1
// NOTE the cancelation of two negatives
force_accum = force_accum + gran_params->sphere_mass_SU * mesh_params->adhesionAcc_s2m * delta / depth;
// Velocity difference, it's better to do a coalesced access here than a fragmented access
// inside
float3 v_rel = sphere_vel[sphereIDLocal] - d_triangleSoup->vel[fam];
// TODO assumes pos is the center of mass of the mesh
// TODO can this be float?
float3 meshCenter =
make_float3(mesh_params->fam_frame_broad[fam].pos[0], mesh_params->fam_frame_broad[fam].pos[1],
mesh_params->fam_frame_broad[fam].pos[2]);
convert_pos_UU2SU<float3>(meshCenter, gran_params);
// NOTE depth is negative and normal points from triangle to sphere center
float3 r = pt1_float + normal * (depth / 2) - meshCenter;
// Add angular velocity contribution from mesh
v_rel = v_rel - Cross(d_triangleSoup->omega[fam], r);
// add tangential components if they exist
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// Vector from the center of sphere to center of contact volume
float3 r_A = -(gran_params->sphereRadius_SU + depth / 2.f) * normal;
v_rel = v_rel + Cross(omega[sphereIDLocal], r_A);
}
// Force accumulator on sphere for this sphere-triangle collision
// Compute force updates for normal spring term
// Compute force updates for damping term
// NOTE assumes sphere mass of 1
float fam_mass_SU = d_triangleSoup->familyMass_SU[fam];
const float sphere_mass_SU = gran_params->sphere_mass_SU;
float m_eff = sphere_mass_SU * fam_mass_SU / (sphere_mass_SU + fam_mass_SU);
float3 vrel_n = Dot(v_rel, normal) * normal;
v_rel = v_rel - vrel_n; // v_rel is now tangential relative velocity
// Add normal damping term
force_accum = force_accum - hertz_force_factor * mesh_params->Gamma_n_s2m_SU * m_eff * vrel_n;
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// radius pointing from the contact point to the center of particle
float3 Rc = (gran_params->sphereRadius_SU + depth / 2.f) * normal;
float3 roll_ang_acc = computeRollingAngAcc(
sphere_data, gran_params, mesh_params->rolling_coeff_s2m_SU, mesh_params->spinning_coeff_s2m_SU,
force_accum, omega[sphereIDLocal], d_triangleSoup->omega[fam], Rc);
sphere_AngAcc = sphere_AngAcc + roll_ang_acc;
unsigned int BC_histmap_label = triangleFamilyHistmapOffset + fam;
// compute tangent force
float3 tangent_force = computeFrictionForces(
gran_params, sphere_data, sphereIDGlobal, BC_histmap_label,
mesh_params->static_friction_coeff_s2m, mesh_params->K_t_s2m_SU, mesh_params->Gamma_t_s2m_SU,
hertz_force_factor, m_eff, force_accum, v_rel, normal);
float force_unit = gran_params->MASS_UNIT * gran_params->LENGTH_UNIT /
(gran_params->TIME_UNIT * gran_params->TIME_UNIT);
float velocity_unit = gran_params->LENGTH_UNIT / gran_params->TIME_UNIT;
force_accum = force_accum + tangent_force;
sphere_AngAcc =
sphere_AngAcc + Cross(-1.f * normal, tangent_force) / gran_params->sphereInertia_by_r;
}
// Use the CD information to compute the force and torque on the family of this triangle
sphere_force = sphere_force + force_accum;
// Force on the mesh is opposite the force on the sphere
float3 force_total = -1.f * force_accum;
float3 torque = Cross(fromCenter, force_total);
// TODO we could be much smarter about reducing this atomic write
unsigned int fam = d_triangleSoup->triangleFamily_ID[triangleIDs[triangleLocalID]];
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 0, force_total.x);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 1, force_total.y);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 2, force_total.z);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 3, torque.x);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 4, torque.y);
atomicAdd(d_triangleSoup->generalizedForcesPerFamily + fam * 6 + 5, torque.z);
}
} // end of per-triangle loop
// write back sphere forces
atomicAdd(sphere_data->sphere_acc_X + sphereIDGlobal, sphere_force.x / gran_params->sphere_mass_SU);
atomicAdd(sphere_data->sphere_acc_Y + sphereIDGlobal, sphere_force.y / gran_params->sphere_mass_SU);
atomicAdd(sphere_data->sphere_acc_Z + sphereIDGlobal, sphere_force.z / gran_params->sphere_mass_SU);
if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) {
// write back torques for later
atomicAdd(sphere_data->sphere_ang_acc_X + sphereIDGlobal, sphere_AngAcc.x);
atomicAdd(sphere_data->sphere_ang_acc_Y + sphereIDGlobal, sphere_AngAcc.y);
atomicAdd(sphere_data->sphere_ang_acc_Z + sphereIDGlobal, sphere_AngAcc.z);
}
} // end sphere id check
} // end kernel
__host__ double ChSystemGpuMesh_impl::AdvanceSimulation(float duration) {
// Figure our the number of blocks that need to be launched to cover the box
unsigned int nBlocks = (nSpheres + CUDA_THREADS_PER_BLOCK - 1) / CUDA_THREADS_PER_BLOCK;
// Settling simulation loop.
float duration_SU = (float)(duration / TIME_SU2UU);
unsigned int nsteps = (unsigned int)std::round(duration_SU / stepSize_SU);
packSphereDataPointers();
// cudaMemAdvise(gran_params, sizeof(*gran_params), cudaMemAdviseSetReadMostly, dev_ID);
METRICS_PRINTF("advancing by %f at timestep %f, %u timesteps at approx user timestep %f\n", duration_SU,
stepSize_SU, nsteps, duration / nsteps);
METRICS_PRINTF("Starting Main Simulation loop!\n");
float time_elapsed_SU = 0.f; // time elapsed in this call (SU)
// Run the simulation, there are aggressive synchronizations because we want to have no race conditions
for (; time_elapsed_SU < stepSize_SU * nsteps; time_elapsed_SU += stepSize_SU) {
updateBCPositions();
runSphereBroadphase();
resetSphereAccelerations();
resetBCForces();
if (meshSoup->nTrianglesInSoup != 0 && mesh_collision_enabled) {
gpuErrchk(
cudaMemset(meshSoup->generalizedForcesPerFamily, 0, 6 * meshSoup->numTriangleFamilies * sizeof(float)));
}
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
if (meshSoup->nTrianglesInSoup != 0 && mesh_collision_enabled) {
runTriangleBroadphase();
}
METRICS_PRINTF("Starting computeSphereForces!\n");
if (gran_params->friction_mode == CHGPU_FRICTION_MODE::FRICTIONLESS) {
// Compute sphere-sphere forces
if (gran_params->use_mat_based == true) {
METRICS_PRINTF("use material based model\n");
computeSphereForces_frictionless_matBased<<<nSDs, MAX_COUNT_OF_SPHERES_PER_SD>>>(
sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(),
(unsigned int)BC_params_list_SU.size());
} else {
METRICS_PRINTF("use user defined model\n");
computeSphereForces_frictionless<<<nSDs, MAX_COUNT_OF_SPHERES_PER_SD>>>(
sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(),
(unsigned int)BC_params_list_SU.size());
}
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
// frictional contact
else if (gran_params->friction_mode == CHGPU_FRICTION_MODE::SINGLE_STEP ||
gran_params->friction_mode == CHGPU_FRICTION_MODE::MULTI_STEP) {
// figure out who is contacting
determineContactPairs<<<nSDs, MAX_COUNT_OF_SPHERES_PER_SD>>>(sphere_data, gran_params);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
METRICS_PRINTF("Frictional case.\n");
if (gran_params->use_mat_based == true) {
METRICS_PRINTF("compute sphere-sphere and sphere-bc mat based\n");
computeSphereContactForces_matBased<<<nBlocks, CUDA_THREADS_PER_BLOCK>>>(
sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(),
(unsigned int)BC_params_list_SU.size(), nSpheres);
} else {
METRICS_PRINTF("compute sphere-sphere and sphere-bc user defined\n");
computeSphereContactForces<<<nBlocks, CUDA_THREADS_PER_BLOCK>>>(
sphere_data, gran_params, BC_type_list.data(), BC_params_list_SU.data(),
(unsigned int)BC_params_list_SU.size(), nSpheres);
}
}
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
if (meshSoup->numTriangleFamilies != 0 && mesh_collision_enabled) {
// TODO please do not use a template here
// triangle labels come after BC labels numerically
unsigned int triangleFamilyHistmapOffset =
gran_params->nSpheres + 1 + (unsigned int)BC_params_list_SU.size() + 1;
// compute sphere-triangle forces
if (tri_params->use_mat_based == true) {
interactionGranMat_TriangleSoup_matBased<<<nSDs, MAX_COUNT_OF_SPHERES_PER_SD>>>(
meshSoup, sphere_data, SD_trianglesInEachSD_composite.data(), SD_numTrianglesTouching.data(),
SD_TrianglesCompositeOffsets.data(), gran_params, tri_params, triangleFamilyHistmapOffset);
} else {
// // printf("compute sphere-mesh user defined\n");
interactionGranMat_TriangleSoup<<<nSDs, MAX_COUNT_OF_SPHERES_PER_SD>>>(
meshSoup, sphere_data, SD_trianglesInEachSD_composite.data(), SD_numTrianglesTouching.data(),
SD_TrianglesCompositeOffsets.data(), gran_params, tri_params, triangleFamilyHistmapOffset);
}
}
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
METRICS_PRINTF("Starting integrateSpheres!\n");
integrateSpheres<<<nBlocks, CUDA_THREADS_PER_BLOCK>>>(stepSize_SU, sphere_data, nSpheres, gran_params);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
if (gran_params->friction_mode != CHGPU_FRICTION_MODE::FRICTIONLESS) {
const unsigned int nThreadsUpdateHist = 2 * CUDA_THREADS_PER_BLOCK;
unsigned int fricMapSize = nSpheres * MAX_SPHERES_TOUCHED_BY_SPHERE;
unsigned int nBlocksFricHistoryPostProcess = (fricMapSize + nThreadsUpdateHist - 1) / nThreadsUpdateHist;
updateFrictionData<<<nBlocksFricHistoryPostProcess, nThreadsUpdateHist>>>(fricMapSize, sphere_data,
gran_params);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
updateAngVels<<<nBlocks, CUDA_THREADS_PER_BLOCK>>>(stepSize_SU, sphere_data, nSpheres, gran_params);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
elapsedSimTime += (float)(stepSize_SU * TIME_SU2UU); // Advance current time
}
return time_elapsed_SU * TIME_SU2UU; // return elapsed UU time
}
} // namespace gpu
} // namespace chrono
|
3a6d03aee785a44b51ea2dd681c948a49f7620dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <iostream>
#include <string>
#include <sstream>
#include <vector>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using namespace std;
using namespace thrust;
// #define DocNum 3
// #define Classes 2
// #define DocWords 3
// #define DocClass_0 1
// #define DocClass_1 2
#define DocNum 10
#define Classes 2
#define DocWords 20
#define DocClass_0 5
#define DocClass_1 5
__host__ int isin(host_vector<string> vocab, string f) // just check that is string f in vector vocab?
{
// cout << "debug " << f << endl;
if (!vocab.empty())
{
// cout << "vocab not empty" << endl;
for (int i = 0; i < vocab.size(); i++)
{
if (vocab[i].compare(f) == 0)
{
return i;
}
}
}
return -1;
};
__host__ int isinclassify(host_vector<string> &vocab, string Word) {
for (int i = 0; i < vocab.size(); i++) {
if (vocab[i].compare(Word) == 0) {
return i;
}
}
return -1;
}
__host__ void translateDoc( host_vector<string> vocabList,host_vector<string> docs, int* docWord_arr) {
int index = 0;
for (int i = 0; i < docs.size(); i++) {
stringstream ssin(docs[i]);
string word;
while (ssin >> word)
{
docWord_arr[index] = isin(vocabList, word);
index++;
}
}
}
__host__ void getVocab(host_vector<string> &docList, host_vector<string> &vocabList) {
for (int i = 0; i < docList.size(); i++) {
stringstream ssin(docList[i]);
string word;
// printf("%s\n", word);
while (ssin >> word) {
if (isin(vocabList, word) == -1){
vocabList.push_back(word);
}
}
}
// for (int i = 0; i < DocNum; i++)
// {
// stringstream ssin(docList[i]);
// string word;
// printf("%s\n", word);
// while (ssin >> word)
// {
// if (isin(vocabList, word) == -1)
// {
// vocabList.push_back(word);
// }
// }
// }
}
__global__ void term_ClassN(int * doc, int * termInClass, int nDoc) {
int tid = threadIdx.x;
// printf("this is from term_ClassN thread %d\n", tid);
for (int j = 0; j < nDoc*DocWords; j++) {
if (tid == doc[j]) {
// printf("thread id %d and doc word is %d\n",tid, doc[j]);
termInClass[tid] = termInClass[tid] + 1;
}
}
}
__global__ void find_posterior(int * termInClass, int * nDoc_class, double * posteriorProb) {
int tid = threadIdx.x;
double pos = ((termInClass[tid] + 1) * 1.0) / ((*nDoc_class + 2) * 1.0);
// printf("this is thread %d and pos is %lf add arr index %d\n",tid,pos,tid * (*cur_class));
posteriorProb[tid] = pos;
}
__host__ void translateDocClassify(host_vector<string> &vocabList, string doc, int* docWord_arr) {
string eachword;
string Word[DocWords];
// making a string stream
stringstream ssin(doc);
// Read and print each word.
int i = 0;
while (ssin >> eachword) {
// cout << eachword << endl;
// Word.push_back(eachword);
docWord_arr[i] = isinclassify(vocabList, eachword);
// cout << eachword << endl;
Word[i] = eachword;
i++;
}
}
__global__ void classifyperthread(int *d_in, int *d_out, int* docWord_arr, int sizeofVocab, double *d_posteriorProb_class0,
double *d_posteriorProb_class1) {
int id = threadIdx.x;
bool donthave = true;
for (int i = 0; i < Classes; i++) {
for (int j = 0; j < DocWords; j++) {
if (d_in[j] == id) {
if (i == 0) {
d_posteriorProb_class0[id] = d_posteriorProb_class0[id] * d_posteriorProb_class0[id];
// printf("class %d posteriorProb = %f\n", i, d_posteriorProb_class0[id]);
donthave = false;
break;
}
else {
d_posteriorProb_class1[id] = d_posteriorProb_class0[id] * d_posteriorProb_class0[id];
// printf("class %d posteriorProb = %f\n", i, d_posteriorProb_class1[id]);
donthave = false;
break;
}
}
}
if (donthave) {
if (i == 0) {
d_posteriorProb_class0[id] = d_posteriorProb_class0[id] * (1-(d_posteriorProb_class0[id]));
// printf("class %d posteriorProb = %f\n", i, d_posteriorProb_class0[id]);
}
else {
d_posteriorProb_class1[id] = d_posteriorProb_class0[id] * (1-(d_posteriorProb_class0[id]));
// printf("class %d posteriorProb = %f\n", i, d_posteriorProb_class1[id]);
}
}
}
}
__host__ int findMax(double *priorProb, double *posteriorProb_class0, double *posteriorProb_class1, int sizeofVocab) {
vector<double> prob;
for (int i = 0; i < Classes; i++) {
for (int j = 0; j <= sizeofVocab; j++) {
if (j == 0) {
if (i == 0) {
prob.push_back(posteriorProb_class0[j]);
// prob.push_back(priorProb[i] * posteriorProb_class0[j]);
}
else {
prob.push_back(posteriorProb_class1[j]);
// prob.push_back(priorProb[i] * posteriorProb_class1[j]);
}
}
else if (j < sizeofVocab) {
if (i == 0) {
prob[i] *= posteriorProb_class0[j];
}
else {
prob[i] *= posteriorProb_class1[j];
}
}
else if (j == sizeofVocab) {
if (i == 0) {
prob[i] = prob[i] * priorProb[i];
}
else {
prob[i] = prob[i] * priorProb[i];
}
}
}
}
cout << prob[0] << " " << prob[1] << endl;
if (prob[0] > prob[1])
return 0;
else
return 1;
}
int main() {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
host_vector<string> c_0;
host_vector<string> c_1;
//Test dataset
// c_0.push_back("animal dog bird");
// c_1.push_back("item box ball");
// c_1.push_back("bottle item bomb");
// string doc = "item bottle bird";
// string doc = "animal dog bird";
c_0.push_back("eligator hosting server we have hosting that can serve you Just paid 20 dollars per month for hosting your web");
c_0.push_back("explore our selection of local favorites with 0 dollars delivery fee for your first month 10 dollars order minimum terms");
c_0.push_back("need graphic design help in just a few clicks you can scale your creative output by hiring our pro designer");
c_0.push_back("so your business is up and running now what grow with a marketing crm that gets smarter as you go");
c_0.push_back("start and grow your business with shopify turn what you love into what you sell try shopify for free today");
// c_0.push_back("looking for new glasses answer a few quick questions and we will suggest some great looking frames for you free");
c_1.push_back("today I feel like I want to sleep all day I just wanna lay in my bed and go sleep");
c_1.push_back("this week is rainy everyday I have to take my umbrella everyday it make me annoy sometimes when I walk");
c_1.push_back("I am so tired I just want to rest in my vacation time go see outside not sit in table");
c_1.push_back("she go to market to buy some pills but when she went out she forgot her wallet at her home");
c_1.push_back("I am so tired now so I want to go to bed because I feel like I am not ok");
// string doc = "create your website for your business just 399 dollars per month you can create your beautiful website for your business";
// string doc = "I think I will go to sleep so do not disturb me I so tired now leave me alone please";
// string doc = " I am so tired now so I want to go to bed because I feel like I am not ok";
// string doc = "I feel like I am so tired I want to sleep everyday when I sleep in my bed feel good";
string doc = "I am so tired now so I want to go to bed because I feel like I am not ok";
// ***class 0 is ads class 1 is not ads***
host_vector<string> vocabList;
double priorProb[Classes];
priorProb[0] = ((DocClass_0 + 1) * 1.0) / (((DocClass_0 + DocClass_1) + 2) * 1.0);
priorProb[1] = ((DocClass_1 + 1) * 1.0) / (((DocClass_0 + DocClass_1) + 2) * 1.0);
getVocab(c_0, vocabList);
getVocab(c_1, vocabList);
int class_0_arr[DocClass_0*DocWords];
int class_1_arr[DocClass_1*DocWords];
int termInClass_0[DocNum*DocWords];
int termInClass_1[DocNum*DocWords];
for (int t = 0; t < DocNum*DocWords; t++) { // set value in termInClass to 0 for count in function
termInClass_0[t] = 0;
termInClass_1[t] = 0;
}
translateDoc(vocabList, c_0, class_0_arr);
translateDoc(vocabList, c_1, class_1_arr);
// kernel ---------------------------------------------------
int * d_doc_array, *d_termInClass_0,*d_termInClass_1 ;
// class 0
hipMalloc((void **) &d_doc_array, DocClass_0*DocWords*sizeof(int));
hipMalloc((void **) &d_termInClass_0, DocNum*DocWords*sizeof(int));
hipMemcpy(d_doc_array, &class_0_arr, DocClass_0*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_termInClass_0, &termInClass_0, DocNum*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start);
hipLaunchKernelGGL(( term_ClassN), dim3(1),dim3(DocNum*DocWords), 0, 0, d_doc_array, d_termInClass_0,DocClass_0);
hipMemcpy(&termInClass_0, d_termInClass_0, DocNum*DocWords*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_doc_array);
// hipFree(d_termInClass_0);
// ---------------
// class 1
hipMalloc((void **) &d_doc_array, DocClass_1*DocWords*sizeof(int));
hipMalloc((void **) &d_termInClass_1, DocNum*DocWords*sizeof(int));
hipMemcpy(d_doc_array, &class_1_arr, DocClass_1*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_termInClass_1, &termInClass_1, DocNum*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( term_ClassN), dim3(1),dim3(DocNum*DocWords), 0, 0, d_doc_array, d_termInClass_1,DocClass_1);
hipMemcpy(&termInClass_1, d_termInClass_1, DocNum*DocWords*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_doc_array);
// hipFree(d_termInClass_1);
int * d_nDoc_class ;
double * d_posteriorProb_class0, *d_posteriorProb_class1;
double posteriorProb_class0[DocWords*DocNum];
double posteriorProb_class1[DocWords*DocNum];
// posteriorProb class 0 ---------------------
int size_of_docClass = DocClass_0;
hipMalloc((void **) &d_termInClass_0, DocNum*DocWords*sizeof(int));
hipMalloc((void **) &d_nDoc_class, sizeof(int));
hipMalloc((void **) &d_posteriorProb_class0, (DocWords*DocNum)*sizeof(double));
hipMemcpy(d_termInClass_0, &termInClass_0, DocNum*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nDoc_class, &size_of_docClass, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_posteriorProb_class0, &posteriorProb_class0, (Classes*DocWords*DocNum)*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( find_posterior), dim3(1),dim3(DocNum*DocWords), 0, 0, d_termInClass_0, d_nDoc_class, d_posteriorProb_class0);
hipMemcpy(&posteriorProb_class0, d_posteriorProb_class0, (DocWords*DocNum)*sizeof(double), hipMemcpyDeviceToHost);
// hipFree(d_termInClass_0);
hipFree(d_nDoc_class);
// hipFree(d_posteriorProb_class0);
// -------------------------------------------
// cout << "----------" << endl;
// class 1 -----------------------------------
size_of_docClass = DocClass_1;
hipMalloc((void **) &d_termInClass_1, DocNum*DocWords*sizeof(int));
hipMalloc((void **) &d_nDoc_class, sizeof(int));
hipMalloc((void **) &d_posteriorProb_class1, (DocWords*DocNum)*sizeof(double));
hipMemcpy(d_termInClass_1, &termInClass_1, DocNum*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nDoc_class, &size_of_docClass, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_posteriorProb_class1, &posteriorProb_class1, (DocWords*DocNum)*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( find_posterior), dim3(1),dim3(DocNum*DocWords), 0, 0, d_termInClass_1, d_nDoc_class, d_posteriorProb_class1);
hipMemcpy(&posteriorProb_class1, d_posteriorProb_class1, (DocWords*DocNum)*sizeof(double), hipMemcpyDeviceToHost);
// hipFree(d_termInClass_1);
hipFree(d_nDoc_class);
// hipFree(d_posteriorProb_class1);
// --------------------------------------------
// show value of priorProb and posteriorProb
// cout << endl <<"This is priorProb" << endl << endl;
// for (int pp = 0 ; pp < Classes; pp++) {
// cout << priorProb[pp] << endl;
// }
// cout << endl << "this is posteriorProb" << endl << endl;
// cout << "Class 0" << endl << endl;
// for (int p = 0; p < DocWords*DocNum; p++) {
// cout << posteriorProb_class0[p] << endl;
// }
// cout << endl << "Class 1" << endl << endl;
// for (int pd = 0; pd < DocWords*DocNum; pd++) {
// cout << posteriorProb_class1[pd] << endl;
// }
int docWord_arr[DocWords];
//translate
translateDocClassify(vocabList, doc, docWord_arr);
//then get docWord_arr
int *d_in;
int *d_out;
int h_out[1];
hipMalloc((void**) &d_in, DocWords*sizeof(int));
hipMalloc((void**) &d_out, sizeof(int));
hipMemcpy(d_in, &docWord_arr, DocWords*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( classifyperthread), dim3(1), dim3(DocWords*DocNum), 0, 0, d_in, d_out, docWord_arr, vocabList.size(), d_posteriorProb_class0, d_posteriorProb_class1);
hipMemcpy(&posteriorProb_class0, d_posteriorProb_class0, (DocWords*DocNum)*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(&posteriorProb_class1, d_posteriorProb_class1, (DocWords*DocNum)*sizeof(double), hipMemcpyDeviceToHost);
cout << "Class = " << findMax(priorProb, posteriorProb_class0, posteriorProb_class1, vocabList.size()) << endl;
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipMemcpy(&h_out, d_out, sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_termInClass_0);
hipFree(d_termInClass_1);
hipFree(d_termInClass_0);
hipFree(d_posteriorProb_class0);
hipFree(d_termInClass_1);
hipFree(d_posteriorProb_class1);
hipFree(d_in);
hipFree(d_out);
printf("time used: %f\n", milliseconds);
return -1;
}
|
3a6d03aee785a44b51ea2dd681c948a49f7620dd.cu
|
#include <cstdio>
#include <iostream>
#include <string>
#include <sstream>
#include <vector>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using namespace std;
using namespace thrust;
// #define DocNum 3
// #define Classes 2
// #define DocWords 3
// #define DocClass_0 1
// #define DocClass_1 2
#define DocNum 10
#define Classes 2
#define DocWords 20
#define DocClass_0 5
#define DocClass_1 5
__host__ int isin(host_vector<string> vocab, string f) // just check that is string f in vector vocab?
{
// cout << "debug " << f << endl;
if (!vocab.empty())
{
// cout << "vocab not empty" << endl;
for (int i = 0; i < vocab.size(); i++)
{
if (vocab[i].compare(f) == 0)
{
return i;
}
}
}
return -1;
};
__host__ int isinclassify(host_vector<string> &vocab, string Word) {
for (int i = 0; i < vocab.size(); i++) {
if (vocab[i].compare(Word) == 0) {
return i;
}
}
return -1;
}
__host__ void translateDoc( host_vector<string> vocabList,host_vector<string> docs, int* docWord_arr) {
int index = 0;
for (int i = 0; i < docs.size(); i++) {
stringstream ssin(docs[i]);
string word;
while (ssin >> word)
{
docWord_arr[index] = isin(vocabList, word);
index++;
}
}
}
__host__ void getVocab(host_vector<string> &docList, host_vector<string> &vocabList) {
for (int i = 0; i < docList.size(); i++) {
stringstream ssin(docList[i]);
string word;
// printf("%s\n", word);
while (ssin >> word) {
if (isin(vocabList, word) == -1){
vocabList.push_back(word);
}
}
}
// for (int i = 0; i < DocNum; i++)
// {
// stringstream ssin(docList[i]);
// string word;
// printf("%s\n", word);
// while (ssin >> word)
// {
// if (isin(vocabList, word) == -1)
// {
// vocabList.push_back(word);
// }
// }
// }
}
__global__ void term_ClassN(int * doc, int * termInClass, int nDoc) {
int tid = threadIdx.x;
// printf("this is from term_ClassN thread %d\n", tid);
for (int j = 0; j < nDoc*DocWords; j++) {
if (tid == doc[j]) {
// printf("thread id %d and doc word is %d\n",tid, doc[j]);
termInClass[tid] = termInClass[tid] + 1;
}
}
}
__global__ void find_posterior(int * termInClass, int * nDoc_class, double * posteriorProb) {
int tid = threadIdx.x;
double pos = ((termInClass[tid] + 1) * 1.0) / ((*nDoc_class + 2) * 1.0);
// printf("this is thread %d and pos is %lf add arr index %d\n",tid,pos,tid * (*cur_class));
posteriorProb[tid] = pos;
}
__host__ void translateDocClassify(host_vector<string> &vocabList, string doc, int* docWord_arr) {
string eachword;
string Word[DocWords];
// making a string stream
stringstream ssin(doc);
// Read and print each word.
int i = 0;
while (ssin >> eachword) {
// cout << eachword << endl;
// Word.push_back(eachword);
docWord_arr[i] = isinclassify(vocabList, eachword);
// cout << eachword << endl;
Word[i] = eachword;
i++;
}
}
__global__ void classifyperthread(int *d_in, int *d_out, int* docWord_arr, int sizeofVocab, double *d_posteriorProb_class0,
double *d_posteriorProb_class1) {
int id = threadIdx.x;
bool donthave = true;
for (int i = 0; i < Classes; i++) {
for (int j = 0; j < DocWords; j++) {
if (d_in[j] == id) {
if (i == 0) {
d_posteriorProb_class0[id] = d_posteriorProb_class0[id] * d_posteriorProb_class0[id];
// printf("class %d posteriorProb = %f\n", i, d_posteriorProb_class0[id]);
donthave = false;
break;
}
else {
d_posteriorProb_class1[id] = d_posteriorProb_class0[id] * d_posteriorProb_class0[id];
// printf("class %d posteriorProb = %f\n", i, d_posteriorProb_class1[id]);
donthave = false;
break;
}
}
}
if (donthave) {
if (i == 0) {
d_posteriorProb_class0[id] = d_posteriorProb_class0[id] * (1-(d_posteriorProb_class0[id]));
// printf("class %d posteriorProb = %f\n", i, d_posteriorProb_class0[id]);
}
else {
d_posteriorProb_class1[id] = d_posteriorProb_class0[id] * (1-(d_posteriorProb_class0[id]));
// printf("class %d posteriorProb = %f\n", i, d_posteriorProb_class1[id]);
}
}
}
}
__host__ int findMax(double *priorProb, double *posteriorProb_class0, double *posteriorProb_class1, int sizeofVocab) {
vector<double> prob;
for (int i = 0; i < Classes; i++) {
for (int j = 0; j <= sizeofVocab; j++) {
if (j == 0) {
if (i == 0) {
prob.push_back(posteriorProb_class0[j]);
// prob.push_back(priorProb[i] * posteriorProb_class0[j]);
}
else {
prob.push_back(posteriorProb_class1[j]);
// prob.push_back(priorProb[i] * posteriorProb_class1[j]);
}
}
else if (j < sizeofVocab) {
if (i == 0) {
prob[i] *= posteriorProb_class0[j];
}
else {
prob[i] *= posteriorProb_class1[j];
}
}
else if (j == sizeofVocab) {
if (i == 0) {
prob[i] = prob[i] * priorProb[i];
}
else {
prob[i] = prob[i] * priorProb[i];
}
}
}
}
cout << prob[0] << " " << prob[1] << endl;
if (prob[0] > prob[1])
return 0;
else
return 1;
}
int main() {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
host_vector<string> c_0;
host_vector<string> c_1;
//Test dataset
// c_0.push_back("animal dog bird");
// c_1.push_back("item box ball");
// c_1.push_back("bottle item bomb");
// string doc = "item bottle bird";
// string doc = "animal dog bird";
c_0.push_back("eligator hosting server we have hosting that can serve you Just paid 20 dollars per month for hosting your web");
c_0.push_back("explore our selection of local favorites with 0 dollars delivery fee for your first month 10 dollars order minimum terms");
c_0.push_back("need graphic design help in just a few clicks you can scale your creative output by hiring our pro designer");
c_0.push_back("so your business is up and running now what grow with a marketing crm that gets smarter as you go");
c_0.push_back("start and grow your business with shopify turn what you love into what you sell try shopify for free today");
// c_0.push_back("looking for new glasses answer a few quick questions and we will suggest some great looking frames for you free");
c_1.push_back("today I feel like I want to sleep all day I just wanna lay in my bed and go sleep");
c_1.push_back("this week is rainy everyday I have to take my umbrella everyday it make me annoy sometimes when I walk");
c_1.push_back("I am so tired I just want to rest in my vacation time go see outside not sit in table");
c_1.push_back("she go to market to buy some pills but when she went out she forgot her wallet at her home");
c_1.push_back("I am so tired now so I want to go to bed because I feel like I am not ok");
// string doc = "create your website for your business just 399 dollars per month you can create your beautiful website for your business";
// string doc = "I think I will go to sleep so do not disturb me I so tired now leave me alone please";
// string doc = " I am so tired now so I want to go to bed because I feel like I am not ok";
// string doc = "I feel like I am so tired I want to sleep everyday when I sleep in my bed feel good";
string doc = "I am so tired now so I want to go to bed because I feel like I am not ok";
// ***class 0 is ads class 1 is not ads***
host_vector<string> vocabList;
double priorProb[Classes];
priorProb[0] = ((DocClass_0 + 1) * 1.0) / (((DocClass_0 + DocClass_1) + 2) * 1.0);
priorProb[1] = ((DocClass_1 + 1) * 1.0) / (((DocClass_0 + DocClass_1) + 2) * 1.0);
getVocab(c_0, vocabList);
getVocab(c_1, vocabList);
int class_0_arr[DocClass_0*DocWords];
int class_1_arr[DocClass_1*DocWords];
int termInClass_0[DocNum*DocWords];
int termInClass_1[DocNum*DocWords];
for (int t = 0; t < DocNum*DocWords; t++) { // set value in termInClass to 0 for count in function
termInClass_0[t] = 0;
termInClass_1[t] = 0;
}
translateDoc(vocabList, c_0, class_0_arr);
translateDoc(vocabList, c_1, class_1_arr);
// kernel ---------------------------------------------------
int * d_doc_array, *d_termInClass_0,*d_termInClass_1 ;
// class 0
cudaMalloc((void **) &d_doc_array, DocClass_0*DocWords*sizeof(int));
cudaMalloc((void **) &d_termInClass_0, DocNum*DocWords*sizeof(int));
cudaMemcpy(d_doc_array, &class_0_arr, DocClass_0*DocWords*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_termInClass_0, &termInClass_0, DocNum*DocWords*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
term_ClassN<<<1,DocNum*DocWords>>>(d_doc_array, d_termInClass_0,DocClass_0);
cudaMemcpy(&termInClass_0, d_termInClass_0, DocNum*DocWords*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_doc_array);
// cudaFree(d_termInClass_0);
// ---------------
// class 1
cudaMalloc((void **) &d_doc_array, DocClass_1*DocWords*sizeof(int));
cudaMalloc((void **) &d_termInClass_1, DocNum*DocWords*sizeof(int));
cudaMemcpy(d_doc_array, &class_1_arr, DocClass_1*DocWords*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_termInClass_1, &termInClass_1, DocNum*DocWords*sizeof(int), cudaMemcpyHostToDevice);
term_ClassN<<<1,DocNum*DocWords>>>(d_doc_array, d_termInClass_1,DocClass_1);
cudaMemcpy(&termInClass_1, d_termInClass_1, DocNum*DocWords*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_doc_array);
// cudaFree(d_termInClass_1);
int * d_nDoc_class ;
double * d_posteriorProb_class0, *d_posteriorProb_class1;
double posteriorProb_class0[DocWords*DocNum];
double posteriorProb_class1[DocWords*DocNum];
// posteriorProb class 0 ---------------------
int size_of_docClass = DocClass_0;
cudaMalloc((void **) &d_termInClass_0, DocNum*DocWords*sizeof(int));
cudaMalloc((void **) &d_nDoc_class, sizeof(int));
cudaMalloc((void **) &d_posteriorProb_class0, (DocWords*DocNum)*sizeof(double));
cudaMemcpy(d_termInClass_0, &termInClass_0, DocNum*DocWords*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nDoc_class, &size_of_docClass, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_posteriorProb_class0, &posteriorProb_class0, (Classes*DocWords*DocNum)*sizeof(double), cudaMemcpyHostToDevice);
find_posterior<<<1,DocNum*DocWords>>>(d_termInClass_0, d_nDoc_class, d_posteriorProb_class0);
cudaMemcpy(&posteriorProb_class0, d_posteriorProb_class0, (DocWords*DocNum)*sizeof(double), cudaMemcpyDeviceToHost);
// cudaFree(d_termInClass_0);
cudaFree(d_nDoc_class);
// cudaFree(d_posteriorProb_class0);
// -------------------------------------------
// cout << "----------" << endl;
// class 1 -----------------------------------
size_of_docClass = DocClass_1;
cudaMalloc((void **) &d_termInClass_1, DocNum*DocWords*sizeof(int));
cudaMalloc((void **) &d_nDoc_class, sizeof(int));
cudaMalloc((void **) &d_posteriorProb_class1, (DocWords*DocNum)*sizeof(double));
cudaMemcpy(d_termInClass_1, &termInClass_1, DocNum*DocWords*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nDoc_class, &size_of_docClass, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_posteriorProb_class1, &posteriorProb_class1, (DocWords*DocNum)*sizeof(double), cudaMemcpyHostToDevice);
find_posterior<<<1,DocNum*DocWords>>>(d_termInClass_1, d_nDoc_class, d_posteriorProb_class1);
cudaMemcpy(&posteriorProb_class1, d_posteriorProb_class1, (DocWords*DocNum)*sizeof(double), cudaMemcpyDeviceToHost);
// cudaFree(d_termInClass_1);
cudaFree(d_nDoc_class);
// cudaFree(d_posteriorProb_class1);
// --------------------------------------------
// show value of priorProb and posteriorProb
// cout << endl <<"This is priorProb" << endl << endl;
// for (int pp = 0 ; pp < Classes; pp++) {
// cout << priorProb[pp] << endl;
// }
// cout << endl << "this is posteriorProb" << endl << endl;
// cout << "Class 0" << endl << endl;
// for (int p = 0; p < DocWords*DocNum; p++) {
// cout << posteriorProb_class0[p] << endl;
// }
// cout << endl << "Class 1" << endl << endl;
// for (int pd = 0; pd < DocWords*DocNum; pd++) {
// cout << posteriorProb_class1[pd] << endl;
// }
int docWord_arr[DocWords];
//translate
translateDocClassify(vocabList, doc, docWord_arr);
//then get docWord_arr
int *d_in;
int *d_out;
int h_out[1];
cudaMalloc((void**) &d_in, DocWords*sizeof(int));
cudaMalloc((void**) &d_out, sizeof(int));
cudaMemcpy(d_in, &docWord_arr, DocWords*sizeof(int), cudaMemcpyHostToDevice);
classifyperthread<<<1, DocWords*DocNum>>>(d_in, d_out, docWord_arr, vocabList.size(), d_posteriorProb_class0, d_posteriorProb_class1);
cudaMemcpy(&posteriorProb_class0, d_posteriorProb_class0, (DocWords*DocNum)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&posteriorProb_class1, d_posteriorProb_class1, (DocWords*DocNum)*sizeof(double), cudaMemcpyDeviceToHost);
cout << "Class = " << findMax(priorProb, posteriorProb_class0, posteriorProb_class1, vocabList.size()) << endl;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy(&h_out, d_out, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_termInClass_0);
cudaFree(d_termInClass_1);
cudaFree(d_termInClass_0);
cudaFree(d_posteriorProb_class0);
cudaFree(d_termInClass_1);
cudaFree(d_posteriorProb_class1);
cudaFree(d_in);
cudaFree(d_out);
printf("time used: %f\n", milliseconds);
return -1;
}
|
4231f4f474fcdf1c72f7b716fb8785073d0463c2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
/* Program Parameters */
#define MAXN 8000 /* Max value of N */
int N; /* Matrix size */
int nt; /* Number of Threads */
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
/* ------------------ Cuda Code --------------------- */
/****** Parallel Cuda code *******/
/* Defined global variables are
* maximun matrix size = MAXN
* Given matrix size = N
* Input matrix repesentation in 1D A[N][N]
* Output matrix in 1D is B[N][N]
*/
__global__ void meanCalculation(float* d_in, float* d_mean, int N, int nt)
{
extern __shared__ float col_data[];
__shared__ float col_total;
//each thread loads one element from global to shared mem
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int thread_id = threadIdx.y;
unsigned int j = idx_y * N + idx_x;
__syncthreads();
/*Calculation for each thread id*/
col_data[thread_id]=d_in[j];
/*below for loop is for if number of thread < Matrix size.*/
for(int i=0;i<N;i+=nt){
if(N*(nt+thread_id+i)+blockIdx.x < N*N){
col_data[thread_id]+=d_in[(N*(nt+thread_id+i))+blockIdx.x];
}
}
/* Sum reduction performed on each column data which is corresponding to
* one block by zeroth thread of each block
*/
if(thread_id==0){
for(int s=0;s<nt;s++){
col_total+=col_data[thread_id+s];
}
d_mean[blockIdx.x]=col_total/N;
}
}
__global__ void calculate_SD(float* d_in, float* d_mean, float* d_sd, int N, int nt)
{
extern __shared__ float col_sd_data[];
__shared__ float col_sd_total;
//each thread loads one element from global to shared mem
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int thread_id = threadIdx.y;
unsigned int j = idx_y * N + idx_x;
__syncthreads();
col_sd_data[thread_id] = powf(d_in[j] - d_mean[blockIdx.x], 2.0);
for(int i=0;i<N;i+=nt){
if(N*(nt+thread_id+i)+blockIdx.x < N*N){
col_sd_data[thread_id]+=powf(d_in[(N*(nt+thread_id+i))+blockIdx.x] - d_mean[blockIdx.x], 2.0);
}
}
if(thread_id==0){
col_sd_total=0;
for(int s=0;s<nt;s++){
col_sd_total+=col_sd_data[thread_id+s];
}
d_sd[blockIdx.x] = col_sd_total/(float) N;
}
}
__global__ void matrixColumnNorm(float* d_in, float* d_out, float* d_mean, float* d_sd, int N, int nt,int c)
{
unsigned int thread_id = threadIdx.y;
d_out[thread_id+blockIdx.x*N] = (d_in[thread_id+blockIdx.x*N] - d_mean[blockIdx.x]) / d_sd[blockIdx.x];
for(int i=0;i<c;i++){
if((nt+thread_id)+blockIdx.x*N < N*N){
d_out[(nt+thread_id)+blockIdx.x*N] = (d_in[(nt+thread_id)+blockIdx.x*N] - d_mean[blockIdx.x])/d_sd[blockIdx.x];
}
}
}
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 4) {
seed = atoi(argv[3]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 3) {
N = atoi(argv[1]);
nt = atoi(argv[2]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
if (nt > 1024) {
printf("nt = %i is out of range.Please provide number of thread less than 1024.\n", N);
exit(0);
}
}
else {
printf("Usage: %s <matrix_dimension> <number_of_thread> [random seed]\n",
argv[0]);
exit(0);
}
/* Print parameters */
printf("\nMatrix dimension N = %i.\n", N);
}
int main(int argc, char **argv) {
/* Timing variables */
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2;
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
float* A = new float [N * N];
float* B = new float [N * N];
int i,j;
/*initializing input A*/
printf("\nInitializing...\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
A[j* N + i] = (float)rand()/ 64000.00;
}
}
/*print inputs.*/
if (N < 10) {
printf("\nA =\n\t");
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%5.2f%s", A[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}
float* d_in;
float* d_out;
float* d_mean;
float* d_sd;
size_t sizeof2d = N * N * sizeof(float);
size_t sizeof1d = N * sizeof(float);
//allocated the device memory for source array
hipMalloc(&d_in, sizeof2d);
hipMemcpy(d_in, A, sizeof2d, hipMemcpyHostToDevice);
//allocate the device memory for destination array
hipMalloc(&d_out, sizeof2d);
//allocate the device memory for mean arry
hipMalloc(&d_mean, sizeof1d);
//allocate the device memory for sd array
hipMalloc(&d_sd, sizeof1d);
dim3 dimBlock;
dim3 dimGrid;
if( N < nt)
{
dimBlock.x = 1;
dimBlock.y = N;
dimGrid.x = N;
dimGrid.y = 1;
}
else
{
dimBlock.x = 1;
dimBlock.y = nt;
dimGrid.x = N;
dimGrid.y = 1;
}
/* Start Clock */
printf("\nStarting clock.\n");
hipEventRecord(start);
gettimeofday(&etstart, &tzdummy);
etstart2 = times(&cputstart);
double c1=(double)N/(double)nt;
int c=ceil(c1);
hipLaunchKernelGGL(( meanCalculation), dim3(dimGrid), dim3(dimBlock), sizeof1d, 0, d_in, d_mean, N,nt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( calculate_SD), dim3(dimGrid), dim3(dimBlock), sizeof1d, 0, d_in, d_mean, d_sd, N,nt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( matrixColumnNorm), dim3(dimGrid), dim3(dimBlock), 0, 0, d_in, d_out, d_mean, d_sd, N,nt,c);
hipDeviceSynchronize();
/* Stop Clock */
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
hipMemcpy(B, d_out, N * N * sizeof(float), hipMemcpyDeviceToHost);
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
if (N < 10) {
printf("\nB =\n\t");
for (i= 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%1.10f%s", B[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}
/* Display timing results */
printf("\nElapsed CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000);
printf("Elapsed Cuda Time = %g ms \n",milliseconds);
printf("Effective Bandwidth (GB/s): %f \n", (2*sizeof2d/milliseconds)/1e6);
float mean_work = N * log2((float)N) + N;
float sd_work = N * log2((float)N) + (2*N) + (2*N*N);
float norm_work = 2 * N * N;
printf("Effective Throughput (GFLOPS/s): %f \n", ((mean_work+sd_work+norm_work)*1e-9)/(milliseconds*1e-3));
printf("--------------------------------------------\n");
//deallocate device memory
hipFree(d_in);
hipFree(d_out);
hipFree(d_mean);
hipFree(d_sd);
free(A);
free(B);
exit(0);
}
|
4231f4f474fcdf1c72f7b716fb8785073d0463c2.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
/* Program Parameters */
#define MAXN 8000 /* Max value of N */
int N; /* Matrix size */
int nt; /* Number of Threads */
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
/* ------------------ Cuda Code --------------------- */
/****** Parallel Cuda code *******/
/* Defined global variables are
* maximun matrix size = MAXN
* Given matrix size = N
* Input matrix repesentation in 1D A[N][N]
* Output matrix in 1D is B[N][N]
*/
__global__ void meanCalculation(float* d_in, float* d_mean, int N, int nt)
{
extern __shared__ float col_data[];
__shared__ float col_total;
//each thread loads one element from global to shared mem
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int thread_id = threadIdx.y;
unsigned int j = idx_y * N + idx_x;
__syncthreads();
/*Calculation for each thread id*/
col_data[thread_id]=d_in[j];
/*below for loop is for if number of thread < Matrix size.*/
for(int i=0;i<N;i+=nt){
if(N*(nt+thread_id+i)+blockIdx.x < N*N){
col_data[thread_id]+=d_in[(N*(nt+thread_id+i))+blockIdx.x];
}
}
/* Sum reduction performed on each column data which is corresponding to
* one block by zeroth thread of each block
*/
if(thread_id==0){
for(int s=0;s<nt;s++){
col_total+=col_data[thread_id+s];
}
d_mean[blockIdx.x]=col_total/N;
}
}
__global__ void calculate_SD(float* d_in, float* d_mean, float* d_sd, int N, int nt)
{
extern __shared__ float col_sd_data[];
__shared__ float col_sd_total;
//each thread loads one element from global to shared mem
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int thread_id = threadIdx.y;
unsigned int j = idx_y * N + idx_x;
__syncthreads();
col_sd_data[thread_id] = powf(d_in[j] - d_mean[blockIdx.x], 2.0);
for(int i=0;i<N;i+=nt){
if(N*(nt+thread_id+i)+blockIdx.x < N*N){
col_sd_data[thread_id]+=powf(d_in[(N*(nt+thread_id+i))+blockIdx.x] - d_mean[blockIdx.x], 2.0);
}
}
if(thread_id==0){
col_sd_total=0;
for(int s=0;s<nt;s++){
col_sd_total+=col_sd_data[thread_id+s];
}
d_sd[blockIdx.x] = col_sd_total/(float) N;
}
}
__global__ void matrixColumnNorm(float* d_in, float* d_out, float* d_mean, float* d_sd, int N, int nt,int c)
{
unsigned int thread_id = threadIdx.y;
d_out[thread_id+blockIdx.x*N] = (d_in[thread_id+blockIdx.x*N] - d_mean[blockIdx.x]) / d_sd[blockIdx.x];
for(int i=0;i<c;i++){
if((nt+thread_id)+blockIdx.x*N < N*N){
d_out[(nt+thread_id)+blockIdx.x*N] = (d_in[(nt+thread_id)+blockIdx.x*N] - d_mean[blockIdx.x])/d_sd[blockIdx.x];
}
}
}
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 4) {
seed = atoi(argv[3]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 3) {
N = atoi(argv[1]);
nt = atoi(argv[2]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
if (nt > 1024) {
printf("nt = %i is out of range.Please provide number of thread less than 1024.\n", N);
exit(0);
}
}
else {
printf("Usage: %s <matrix_dimension> <number_of_thread> [random seed]\n",
argv[0]);
exit(0);
}
/* Print parameters */
printf("\nMatrix dimension N = %i.\n", N);
}
int main(int argc, char **argv) {
/* Timing variables */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2;
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
float* A = new float [N * N];
float* B = new float [N * N];
int i,j;
/*initializing input A*/
printf("\nInitializing...\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
A[j* N + i] = (float)rand()/ 64000.00;
}
}
/*print inputs.*/
if (N < 10) {
printf("\nA =\n\t");
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%5.2f%s", A[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}
float* d_in;
float* d_out;
float* d_mean;
float* d_sd;
size_t sizeof2d = N * N * sizeof(float);
size_t sizeof1d = N * sizeof(float);
//allocated the device memory for source array
cudaMalloc(&d_in, sizeof2d);
cudaMemcpy(d_in, A, sizeof2d, cudaMemcpyHostToDevice);
//allocate the device memory for destination array
cudaMalloc(&d_out, sizeof2d);
//allocate the device memory for mean arry
cudaMalloc(&d_mean, sizeof1d);
//allocate the device memory for sd array
cudaMalloc(&d_sd, sizeof1d);
dim3 dimBlock;
dim3 dimGrid;
if( N < nt)
{
dimBlock.x = 1;
dimBlock.y = N;
dimGrid.x = N;
dimGrid.y = 1;
}
else
{
dimBlock.x = 1;
dimBlock.y = nt;
dimGrid.x = N;
dimGrid.y = 1;
}
/* Start Clock */
printf("\nStarting clock.\n");
cudaEventRecord(start);
gettimeofday(&etstart, &tzdummy);
etstart2 = times(&cputstart);
double c1=(double)N/(double)nt;
int c=ceil(c1);
meanCalculation<<<dimGrid, dimBlock, sizeof1d>>>(d_in, d_mean, N,nt);
cudaDeviceSynchronize();
calculate_SD<<<dimGrid, dimBlock, sizeof1d>>>(d_in, d_mean, d_sd, N,nt);
cudaDeviceSynchronize();
matrixColumnNorm<<<dimGrid, dimBlock>>>(d_in, d_out, d_mean, d_sd, N,nt,c);
cudaDeviceSynchronize();
/* Stop Clock */
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
cudaMemcpy(B, d_out, N * N * sizeof(float), cudaMemcpyDeviceToHost);
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
if (N < 10) {
printf("\nB =\n\t");
for (i= 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%1.10f%s", B[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}
/* Display timing results */
printf("\nElapsed CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000);
printf("Elapsed Cuda Time = %g ms \n",milliseconds);
printf("Effective Bandwidth (GB/s): %f \n", (2*sizeof2d/milliseconds)/1e6);
float mean_work = N * log2((float)N) + N;
float sd_work = N * log2((float)N) + (2*N) + (2*N*N);
float norm_work = 2 * N * N;
printf("Effective Throughput (GFLOPS/s): %f \n", ((mean_work+sd_work+norm_work)*1e-9)/(milliseconds*1e-3));
printf("--------------------------------------------\n");
//deallocate device memory
cudaFree(d_in);
cudaFree(d_out);
cudaFree(d_mean);
cudaFree(d_sd);
free(A);
free(B);
exit(0);
}
|
4cbfd4c4ba79fc6ad5d6a6583de0025197d1a3e3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <bits/stdc++.h>
#include "wb.h"
int main(int argc, char **argv) {
wbArg_t args;
float *hostInput, *hostOutput; // The input 1D list
float *devInput, *devOutput; // The input 1D list
int num_elements; // number of elements in the input list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput =
(float *)wbImport(wbArg_getInputFile(args, 0), &num_elements);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
num_elements);
// Declare and allocate the host output array
//@@ Insert code here
hostOutput = (float *) malloc(num_elements*sizeof(float));
// Declare and allocate thrust device input and output vectors
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Insert code here
hipMalloc((void**)&devInput,sizeof(float)*num_elements);
hipMalloc((void**)&devOutput,sizeof(float)*num_elements);
// thrust::device_vector<float> dev_in(num_elements);
// thrust::device_vector<float> dev_out(num_elements);
hipMemcpy(devInput,hostInput,sizeof(float)*num_elements,hipMemcpyHostToDevice);
thrust::device_ptr<float> dev_in(devInput);
thrust::device_ptr<float> dev_out(devOutput);
wbTime_stop(GPU, "Allocating GPU memory.");
// Execute vector addition
wbTime_start(Compute,"Doing the computation on the GPU");
//@@ Insert Code here
thrust::inclusive_scan(dev_in, dev_in+num_elements, dev_out);
wbTime_stop(Compute, "Doing the computation on the GPU");
hipMemcpy(hostOutput,devOutput,sizeof(float)*num_elements,hipMemcpyDeviceToHost);
wbSolution(args, hostOutput, num_elements);
// Free Host Memory
free(hostInput);
//@@ Insert code here
return 0;
}
|
4cbfd4c4ba79fc6ad5d6a6583de0025197d1a3e3.cu
|
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <bits/stdc++.h>
#include "wb.h"
int main(int argc, char **argv) {
wbArg_t args;
float *hostInput, *hostOutput; // The input 1D list
float *devInput, *devOutput; // The input 1D list
int num_elements; // number of elements in the input list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput =
(float *)wbImport(wbArg_getInputFile(args, 0), &num_elements);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
num_elements);
// Declare and allocate the host output array
//@@ Insert code here
hostOutput = (float *) malloc(num_elements*sizeof(float));
// Declare and allocate thrust device input and output vectors
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Insert code here
cudaMalloc((void**)&devInput,sizeof(float)*num_elements);
cudaMalloc((void**)&devOutput,sizeof(float)*num_elements);
// thrust::device_vector<float> dev_in(num_elements);
// thrust::device_vector<float> dev_out(num_elements);
cudaMemcpy(devInput,hostInput,sizeof(float)*num_elements,cudaMemcpyHostToDevice);
thrust::device_ptr<float> dev_in(devInput);
thrust::device_ptr<float> dev_out(devOutput);
wbTime_stop(GPU, "Allocating GPU memory.");
// Execute vector addition
wbTime_start(Compute,"Doing the computation on the GPU");
//@@ Insert Code here
thrust::inclusive_scan(dev_in, dev_in+num_elements, dev_out);
wbTime_stop(Compute, "Doing the computation on the GPU");
cudaMemcpy(hostOutput,devOutput,sizeof(float)*num_elements,cudaMemcpyDeviceToHost);
wbSolution(args, hostOutput, num_elements);
// Free Host Memory
free(hostInput);
//@@ Insert code here
return 0;
}
|
86ba247a417bbf56bc803c6fdb616f3c470f6db1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal d -> s
*/
#include "common_magma.h"
#include "commonblas_d.h"
/*
* daxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void daxpy(
double alpha,
const double* __restrict__ b,
double* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m, n, k >= 512
@ingroup magma_dblas3
********************************************************************/
__global__ void
dgemm_kernel_N_N_64_16_16_16_4(
double* __restrict__ C,
const double* __restrict__ A,
const double* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta )
{
__shared__ double Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx+idt >= m )
A += ibx+0;
else
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
B += tx+__mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory accesses
in dimension N.
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1=0;
const double *Bend = B + k - k % 16;
double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
daxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[12][0], Cb );
daxpy( Ab[1], &Bb[13][0], Cb );
daxpy( Ab[2], &Bb[14][0], Cb );
daxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by setting
s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
daxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_N_N_64_16_16_16_4(
double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
hipLaunchKernelGGL(( dgemm_kernel_N_N_64_16_16_16_4), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
86ba247a417bbf56bc803c6fdb616f3c470f6db1.cu
|
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal d -> s
*/
#include "common_magma.h"
#include "commonblas_d.h"
/*
* daxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void daxpy(
double alpha,
const double* __restrict__ b,
double* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m, n, k >= 512
@ingroup magma_dblas3
********************************************************************/
__global__ void
dgemm_kernel_N_N_64_16_16_16_4(
double* __restrict__ C,
const double* __restrict__ A,
const double* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta )
{
__shared__ double Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx+idt >= m )
A += ibx+0;
else
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
B += tx+__mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory accesses
in dimension N.
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1=0;
const double *Bend = B + k - k % 16;
double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
daxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[12][0], Cb );
daxpy( Ab[1], &Bb[13][0], Cb );
daxpy( Ab[2], &Bb[14][0], Cb );
daxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by setting
s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
daxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_N_N_64_16_16_16_4(
double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
dgemm_kernel_N_N_64_16_16_16_4<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
89dd211497235a42a21268441e30e05c0263400d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include <time.h>
#define TRUE 1
#define FALSE 0
#define MIN(a,b) (a < b?a:b )
static const int N = 150;
__global__ void cerca_array_device(int *array,int *valor,int *res)
{
int b;
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(*res == FALSE && *valor == array[id]){
*res = TRUE;
for(int i = 0; i <= 1000000; i++) //demostrar que si se hacen mas operacines en la busqueda, los threads son
b = (b*70)/3;
}
}
__host__ bool cerca_array_host(int *array, int valor)
{
int b;
for(int i = 0 ; i < N ; ++i){
if(array[i] == valor){
return true;
}
for(int i = 0; i <= 1000000; i++)
b = (b*70)/3;
}
return false;
}
int main()
{
srand(time(NULL));
int a[N],valor;
for(int i=0;i<N;i++)
a[i] = (int)rand()/(int)(RAND_MAX/300.0);
for(int i=0;i<N;i++)
printf("valor: %d \t", a[i]);
printf("\nNombre a cercar: ");
scanf("%d",&valor);
//Execuci a la CPU
int res;
clock_t t_host = clock();
res = cerca_array_host(a,valor);
t_host = clock() - t_host;
double time_taken_host = ((double)t_host)/CLOCKS_PER_SEC;
printf("CPU: %f segons \n", time_taken_host);
if(res == TRUE)
printf("host: We found the number\n");
else
printf("host: We don't found the number :(\n");
int *dev_array , *dev_value , *dev_res;
hipMalloc((void**)&dev_array, N*sizeof(int) );
hipMalloc((void**)&dev_value, sizeof(int) );
hipMalloc((void**)&dev_res, sizeof(int) );
res = FALSE;
hipMemcpy(dev_array, a, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_value, &valor, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_res, &res, sizeof(int), hipMemcpyHostToDevice);
int threads_block = MIN(512,N);
while(N%threads_block != 0)--threads_block;
int blocks = N / threads_block;
clock_t t_device = clock();
hipLaunchKernelGGL(( cerca_array_device), dim3(blocks),dim3(threads_block), 0, 0, dev_array,dev_value,dev_res);
hipMemcpy(&res, dev_res, sizeof(int), hipMemcpyDeviceToHost);//Copy memory from device to host
t_device = clock() - t_device;
double time_taken_device = ((double)t_device)/CLOCKS_PER_SEC;
printf("GPU %f segons \n", time_taken_device);
hipFree(dev_array);//free device mem
hipFree(dev_value);
hipFree(dev_res);
//Printa si ha cercat el nombre
if(res == TRUE)
printf("device: We found the number\n");
else
printf("device: We don't found the number :(\n");
return 0;
}
|
89dd211497235a42a21268441e30e05c0263400d.cu
|
#include<stdio.h>
#include <time.h>
#define TRUE 1
#define FALSE 0
#define MIN(a,b) (a < b?a:b )
static const int N = 150;
__global__ void cerca_array_device(int *array,int *valor,int *res)
{
int b;
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(*res == FALSE && *valor == array[id]){
*res = TRUE;
for(int i = 0; i <= 1000000; i++) //demostrar que si se hacen mas operacines en la busqueda, los threads son
b = (b*70)/3;
}
}
__host__ bool cerca_array_host(int *array, int valor)
{
int b;
for(int i = 0 ; i < N ; ++i){
if(array[i] == valor){
return true;
}
for(int i = 0; i <= 1000000; i++)
b = (b*70)/3;
}
return false;
}
int main()
{
srand(time(NULL));
int a[N],valor;
for(int i=0;i<N;i++)
a[i] = (int)rand()/(int)(RAND_MAX/300.0);
for(int i=0;i<N;i++)
printf("valor: %d \t", a[i]);
printf("\nNombre a cercar: ");
scanf("%d",&valor);
//Execució a la CPU
int res;
clock_t t_host = clock();
res = cerca_array_host(a,valor);
t_host = clock() - t_host;
double time_taken_host = ((double)t_host)/CLOCKS_PER_SEC;
printf("CPU: %f segons \n", time_taken_host);
if(res == TRUE)
printf("host: We found the number\n");
else
printf("host: We don't found the number :(\n");
int *dev_array , *dev_value , *dev_res;
cudaMalloc((void**)&dev_array, N*sizeof(int) );
cudaMalloc((void**)&dev_value, sizeof(int) );
cudaMalloc((void**)&dev_res, sizeof(int) );
res = FALSE;
cudaMemcpy(dev_array, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_value, &valor, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_res, &res, sizeof(int), cudaMemcpyHostToDevice);
int threads_block = MIN(512,N);
while(N%threads_block != 0)--threads_block;
int blocks = N / threads_block;
clock_t t_device = clock();
cerca_array_device<<<blocks,threads_block>>>(dev_array,dev_value,dev_res);
cudaMemcpy(&res, dev_res, sizeof(int), cudaMemcpyDeviceToHost);//Copy memory from device to host
t_device = clock() - t_device;
double time_taken_device = ((double)t_device)/CLOCKS_PER_SEC;
printf("GPU %f segons \n", time_taken_device);
cudaFree(dev_array);//free device mem
cudaFree(dev_value);
cudaFree(dev_res);
//Printa si ha cercat el nombre
if(res == TRUE)
printf("device: We found the number\n");
else
printf("device: We don't found the number :(\n");
return 0;
}
|
6bdb03e5a04843da4ef0f3dd23b5394227c2cf41.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <cstdlib>
using namespace std;
hipError_t addWithCuda(long long unsigned int liczba, bool *pierwsza);
__global__ void PierwszaCzyZlozona23(long long unsigned int *liczba, bool *pierwsza)
{
long long unsigned int index = threadIdx.x;
if (*liczba % (index + 2) == 0)
*pierwsza = false;
}
__global__ void PierwszaCzyZlozona(long long unsigned int *liczba, bool *pierwsza, long long unsigned int *przesuniecie)
{
long long unsigned int i = (threadIdx.x + blockDim.x*blockIdx.x + *przesuniecie) * 6;
if (*liczba % ((i + 5)) == 0) { *pierwsza = false; }
if (*liczba % ((i + 5) + 2) == 0) { *pierwsza = false; }
}
int main()
{
// PIERWSZE
// 2^31-1 = 2147483647
// 2^61-1 = 2305843009213693951
// ZLOZONE
// (2^31-1)^2 = 4611686014132420609
// (2^31-1)(2^13-1) = 17590038552577
unsigned long long int liczba = 0;
bool pierwsza = true;
time_t startCPU;
time_t stopCPU;
time_t startGPU;
time_t stopGPU;
cout << "Podaj liczbe" << endl;
cin >> liczba;
cout << "SPRAWDZANIE DLA CPU" << endl;
startCPU = clock();
if (liczba % 2 == 0) pierwsza = false;
else if (liczba % 3 == 0) pierwsza = false;
if (pierwsza)
for (unsigned long long int i = 5; i <= sqrt(liczba); i = i + 6) {
if (liczba % i == 0) { pierwsza = false; break; }
if (liczba % (i + 2) == 0) { pierwsza = false; break; }
}
if (pierwsza) {
cout << "Liczba pierwsza" << endl;
}
else {
cout << "Liczba zlozona" << endl;
}
stopCPU = clock();
double czasCPU = (double)(stopCPU - startCPU) / CLOCKS_PER_SEC;
cout << "Czas sprawdzania na CPU wynosi: " << czasCPU << endl;
pierwsza = true;
cout << "SPRAWDZANIE DLA GPU" << endl;
startGPU = clock();
hipError_t cudaStatus = addWithCuda(liczba, &pierwsza);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
char l;
cin >> l;
return 1;
}
if (pierwsza) {
cout << "Liczba pierwsza" << endl;
}
else {
cout << "Liczba zlozona" << endl;
}
stopGPU = clock();
double czasGPU = (double)(stopGPU - startGPU) / CLOCKS_PER_SEC;
cout << "Czas sprawdzania na GPU wynosi: " << czasGPU << endl;
double przyspieszenie = (double)(czasCPU / czasGPU);
cout << "Przyspieszenie na GPU wzgldem CPU: " << przyspieszenie << endl;
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
system("pause");
return 0;
}
hipError_t addWithCuda(long long unsigned int liczba, bool *pierwsza)
{
long long unsigned int *dev_liczba = 0;
bool *dev_pierwsza = 0;
long long unsigned int przesuniecie = 0;
long long unsigned int *dev_przesuniecie = 0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_liczba, sizeof(long long unsigned int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_pierwsza, sizeof(bool));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_liczba, &liczba, sizeof(long long unsigned int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_pierwsza, pierwsza, sizeof(bool), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// SPRAWDZENIE PATOLOGICZNYCH PRZYPADKOW
PierwszaCzyZlozona23 << <1, 2 >> > (dev_liczba, dev_pierwsza);
if (*pierwsza) {
// PRZYGOTOWANIE DO PODZIALU NA SIATKI BLOKOW I BLOKI WATKOW
int ilosc_watkow_w_bloku = 1024;
int ilosc_blokow_w_siatce = 65535;
long long unsigned int ilosc_iteracji = (sqrt(liczba) + 1) / 6;
//cout << "ilosc iteracji " << ilosc_iteracji << endl;
long long unsigned int ilosc_pelnych_blokow = ilosc_iteracji / ilosc_watkow_w_bloku;
//cout << "ilosc_pelnych_blokow " << ilosc_pelnych_blokow << endl;
long long unsigned int ilosc_watkow_w_niepelnym_bloku = ilosc_iteracji % ilosc_watkow_w_bloku;
//cout << "ilosc_watkow_w_niepelnym_bloku " << ilosc_watkow_w_niepelnym_bloku << endl;
long long unsigned int ilosc_blokow = (ilosc_watkow_w_niepelnym_bloku == 0) ? ilosc_pelnych_blokow : ilosc_pelnych_blokow + 1;
//cout << "ilosc_blokow " << ilosc_blokow << endl;
long long unsigned int ilosc_pelnych_siatek = ilosc_blokow / ilosc_blokow_w_siatce;
//cout << "ilosc_pelnych_siatek " << ilosc_pelnych_siatek << endl;
long long unsigned int ilosc_blokow_w_niepelnej_siatce = ilosc_blokow % ilosc_blokow_w_siatce;
//cout << "ilosc_blokow_w_niepelnej_siatce " << ilosc_blokow_w_niepelnej_siatce << endl;
long long unsigned int ilosc_siatek = (ilosc_blokow_w_niepelnej_siatce == 0) ? ilosc_pelnych_siatek : ilosc_pelnych_siatek + 1;
//cout << "ilosc_siatek " << ilosc_siatek << endl;
cudaStatus = hipMalloc((void**)&dev_przesuniecie, sizeof(long long unsigned int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_przesuniecie, &przesuniecie, sizeof(long long unsigned int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
for (long long unsigned int i = 0; i < ilosc_siatek; i++) {
przesuniecie = i * ilosc_blokow_w_siatce*ilosc_watkow_w_bloku;
cudaStatus = hipMemcpy(dev_przesuniecie, &przesuniecie, sizeof(long long unsigned int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
if (i == ilosc_siatek - 1) {
// PRZYPADEK W KTORYM BADAMY NIEPELNA SIATKE Z PELNYMI BLOKAMI
if (ilosc_blokow_w_niepelnej_siatce > 1) {
PierwszaCzyZlozona << <ilosc_blokow_w_niepelnej_siatce - 1, ilosc_watkow_w_bloku >> > (dev_liczba, dev_pierwsza, dev_przesuniecie);
if (!*pierwsza)
break;
przesuniecie += (ilosc_blokow_w_niepelnej_siatce - 1)*ilosc_watkow_w_bloku;
cudaStatus = hipMemcpy(dev_przesuniecie, &przesuniecie, sizeof(long long unsigned int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
}
// PRZYPADEK W KTORYM BADAMY NIEPELNY BLOK
PierwszaCzyZlozona << <1, ilosc_watkow_w_niepelnym_bloku >> > (dev_liczba, dev_pierwsza, dev_przesuniecie);
}
else
// PRZYPADEK W KTORYM BADAMY PELNA SIATKE Z PELNYMI BLOKAMI
PierwszaCzyZlozona << <ilosc_blokow_w_siatce, ilosc_watkow_w_bloku >> > (dev_liczba, dev_pierwsza, dev_przesuniecie);
if (!*pierwsza)
break;
}
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(pierwsza, dev_pierwsza, sizeof(bool), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_liczba);
hipFree(dev_pierwsza);
hipFree(dev_przesuniecie);
return cudaStatus;
}
|
6bdb03e5a04843da4ef0f3dd23b5394227c2cf41.cu
|
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <cstdlib>
using namespace std;
cudaError_t addWithCuda(long long unsigned int liczba, bool *pierwsza);
__global__ void PierwszaCzyZlozona23(long long unsigned int *liczba, bool *pierwsza)
{
long long unsigned int index = threadIdx.x;
if (*liczba % (index + 2) == 0)
*pierwsza = false;
}
__global__ void PierwszaCzyZlozona(long long unsigned int *liczba, bool *pierwsza, long long unsigned int *przesuniecie)
{
long long unsigned int i = (threadIdx.x + blockDim.x*blockIdx.x + *przesuniecie) * 6;
if (*liczba % ((i + 5)) == 0) { *pierwsza = false; }
if (*liczba % ((i + 5) + 2) == 0) { *pierwsza = false; }
}
int main()
{
// PIERWSZE
// 2^31-1 = 2147483647
// 2^61-1 = 2305843009213693951
// ZLOZONE
// (2^31-1)^2 = 4611686014132420609
// (2^31-1)(2^13-1) = 17590038552577
unsigned long long int liczba = 0;
bool pierwsza = true;
time_t startCPU;
time_t stopCPU;
time_t startGPU;
time_t stopGPU;
cout << "Podaj liczbe" << endl;
cin >> liczba;
cout << "SPRAWDZANIE DLA CPU" << endl;
startCPU = clock();
if (liczba % 2 == 0) pierwsza = false;
else if (liczba % 3 == 0) pierwsza = false;
if (pierwsza)
for (unsigned long long int i = 5; i <= sqrt(liczba); i = i + 6) {
if (liczba % i == 0) { pierwsza = false; break; }
if (liczba % (i + 2) == 0) { pierwsza = false; break; }
}
if (pierwsza) {
cout << "Liczba pierwsza" << endl;
}
else {
cout << "Liczba zlozona" << endl;
}
stopCPU = clock();
double czasCPU = (double)(stopCPU - startCPU) / CLOCKS_PER_SEC;
cout << "Czas sprawdzania na CPU wynosi: " << czasCPU << endl;
pierwsza = true;
cout << "SPRAWDZANIE DLA GPU" << endl;
startGPU = clock();
cudaError_t cudaStatus = addWithCuda(liczba, &pierwsza);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
char l;
cin >> l;
return 1;
}
if (pierwsza) {
cout << "Liczba pierwsza" << endl;
}
else {
cout << "Liczba zlozona" << endl;
}
stopGPU = clock();
double czasGPU = (double)(stopGPU - startGPU) / CLOCKS_PER_SEC;
cout << "Czas sprawdzania na GPU wynosi: " << czasGPU << endl;
double przyspieszenie = (double)(czasCPU / czasGPU);
cout << "Przyspieszenie na GPU względem CPU: " << przyspieszenie << endl;
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
system("pause");
return 0;
}
cudaError_t addWithCuda(long long unsigned int liczba, bool *pierwsza)
{
long long unsigned int *dev_liczba = 0;
bool *dev_pierwsza = 0;
long long unsigned int przesuniecie = 0;
long long unsigned int *dev_przesuniecie = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_liczba, sizeof(long long unsigned int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_pierwsza, sizeof(bool));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_liczba, &liczba, sizeof(long long unsigned int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_pierwsza, pierwsza, sizeof(bool), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// SPRAWDZENIE PATOLOGICZNYCH PRZYPADKOW
PierwszaCzyZlozona23 << <1, 2 >> > (dev_liczba, dev_pierwsza);
if (*pierwsza) {
// PRZYGOTOWANIE DO PODZIALU NA SIATKI BLOKOW I BLOKI WATKOW
int ilosc_watkow_w_bloku = 1024;
int ilosc_blokow_w_siatce = 65535;
long long unsigned int ilosc_iteracji = (sqrt(liczba) + 1) / 6;
//cout << "ilosc iteracji " << ilosc_iteracji << endl;
long long unsigned int ilosc_pelnych_blokow = ilosc_iteracji / ilosc_watkow_w_bloku;
//cout << "ilosc_pelnych_blokow " << ilosc_pelnych_blokow << endl;
long long unsigned int ilosc_watkow_w_niepelnym_bloku = ilosc_iteracji % ilosc_watkow_w_bloku;
//cout << "ilosc_watkow_w_niepelnym_bloku " << ilosc_watkow_w_niepelnym_bloku << endl;
long long unsigned int ilosc_blokow = (ilosc_watkow_w_niepelnym_bloku == 0) ? ilosc_pelnych_blokow : ilosc_pelnych_blokow + 1;
//cout << "ilosc_blokow " << ilosc_blokow << endl;
long long unsigned int ilosc_pelnych_siatek = ilosc_blokow / ilosc_blokow_w_siatce;
//cout << "ilosc_pelnych_siatek " << ilosc_pelnych_siatek << endl;
long long unsigned int ilosc_blokow_w_niepelnej_siatce = ilosc_blokow % ilosc_blokow_w_siatce;
//cout << "ilosc_blokow_w_niepelnej_siatce " << ilosc_blokow_w_niepelnej_siatce << endl;
long long unsigned int ilosc_siatek = (ilosc_blokow_w_niepelnej_siatce == 0) ? ilosc_pelnych_siatek : ilosc_pelnych_siatek + 1;
//cout << "ilosc_siatek " << ilosc_siatek << endl;
cudaStatus = cudaMalloc((void**)&dev_przesuniecie, sizeof(long long unsigned int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_przesuniecie, &przesuniecie, sizeof(long long unsigned int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
for (long long unsigned int i = 0; i < ilosc_siatek; i++) {
przesuniecie = i * ilosc_blokow_w_siatce*ilosc_watkow_w_bloku;
cudaStatus = cudaMemcpy(dev_przesuniecie, &przesuniecie, sizeof(long long unsigned int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
if (i == ilosc_siatek - 1) {
// PRZYPADEK W KTORYM BADAMY NIEPELNA SIATKE Z PELNYMI BLOKAMI
if (ilosc_blokow_w_niepelnej_siatce > 1) {
PierwszaCzyZlozona << <ilosc_blokow_w_niepelnej_siatce - 1, ilosc_watkow_w_bloku >> > (dev_liczba, dev_pierwsza, dev_przesuniecie);
if (!*pierwsza)
break;
przesuniecie += (ilosc_blokow_w_niepelnej_siatce - 1)*ilosc_watkow_w_bloku;
cudaStatus = cudaMemcpy(dev_przesuniecie, &przesuniecie, sizeof(long long unsigned int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
}
// PRZYPADEK W KTORYM BADAMY NIEPELNY BLOK
PierwszaCzyZlozona << <1, ilosc_watkow_w_niepelnym_bloku >> > (dev_liczba, dev_pierwsza, dev_przesuniecie);
}
else
// PRZYPADEK W KTORYM BADAMY PELNA SIATKE Z PELNYMI BLOKAMI
PierwszaCzyZlozona << <ilosc_blokow_w_siatce, ilosc_watkow_w_bloku >> > (dev_liczba, dev_pierwsza, dev_przesuniecie);
if (!*pierwsza)
break;
}
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(pierwsza, dev_pierwsza, sizeof(bool), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_liczba);
cudaFree(dev_pierwsza);
cudaFree(dev_przesuniecie);
return cudaStatus;
}
|
91c8fd7078493c1247c2b213a79b1ff4ac4b2c4c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "normalize.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *nor_ary = NULL;
hipMalloc(&nor_ary, XSIZE*YSIZE);
float *flit_ary = NULL;
hipMalloc(&flit_ary, XSIZE*YSIZE);
float tw = 1;
float ts = 1;
size_t N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
normalize), dim3(gridBlock),dim3(threadBlock), 0, 0, nor_ary,flit_ary,tw,ts,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
normalize), dim3(gridBlock),dim3(threadBlock), 0, 0, nor_ary,flit_ary,tw,ts,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
normalize), dim3(gridBlock),dim3(threadBlock), 0, 0, nor_ary,flit_ary,tw,ts,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
91c8fd7078493c1247c2b213a79b1ff4ac4b2c4c.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "normalize.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *nor_ary = NULL;
cudaMalloc(&nor_ary, XSIZE*YSIZE);
float *flit_ary = NULL;
cudaMalloc(&flit_ary, XSIZE*YSIZE);
float tw = 1;
float ts = 1;
size_t N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
normalize<<<gridBlock,threadBlock>>>(nor_ary,flit_ary,tw,ts,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
normalize<<<gridBlock,threadBlock>>>(nor_ary,flit_ary,tw,ts,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
normalize<<<gridBlock,threadBlock>>>(nor_ary,flit_ary,tw,ts,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
e232af489568a5b517727ee066e34faaaffda971.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <common.h>
#include <perftimer.h>
#include <cfloat>
#include <difi.cuh>
#include <cutil_math.cuh>
#include <aabb3.h>
#include <boundingvolumetree3.h>
#include <thrust/tuple.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sort.h>
#include <cmath>
#include <worldparameters.h>
#include <uniformgrid.cuh>
#include <ext_unit_tests.cuh>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <particleworld.hpp>
#include <hashgrid.hpp>
inline float frand()
{
return rand() / (float)RAND_MAX;
}
inline void cuErr(hipError_t status, const char *file, const int line)
{
if(status != hipSuccess) {
std::cerr << "Cuda API error: ";
std::cerr << hipGetErrorString(status);
std::cerr << " at line " << line;
std::cerr << " of file " << file << std::endl;
std::exit(EXIT_FAILURE);
}
}
__global__ void hashgrid_size(HashGrid<float,unified> *g)
{
printf("HashGrid size = %i\n",g->size_);
printf("HashGrid number of cells = %i\n",g->numCells_);
printf("Grid X Y Z = %i %i %i\n",g->gridx_, g->gridy_, g->gridz_);
printf("Cell size = [%f %f %f]\n" ,g->cellSize_.x
,g->cellSize_.y
,g->cellSize_.z);
printf("Origin = [%f %f %f]\n" ,g->origin_.x
,g->origin_.y
,g->origin_.z);
for(int i(0); i < g->size_; ++i)
{
g->hashEntries_[i]=20-i;
g->particleIndices_[i]=20-i;
printf("particleIndices_[%i]=%i\n",i, g->particleIndices_[i]);
}
}
__global__ void d_test_pwparams(ParticleWorld<float,unified> *w)
{
printf("Number of particles: %i\n",w->params_->numParticles_);
printf("Spring = %f\n",w->params_->spring_);
printf("Damping = %f\n",w->params_->damping_);
printf("Shear = %f\n",w->params_->shear_);
printf("Attraction = %f\n",w->params_->attraction_);
printf("Global dampening = %f\n",w->params_->globalDamping_);
printf("Particle radius = %f\n",w->params_->particleRadius_);
printf("Gravity = [%f %f %f]\n",w->params_->gravity_.x
,w->params_->gravity_.y
,w->params_->gravity_.z);
for(int i(0); i < w->size_*4; i+=4)
{
printf("pos[%i] = [%f %f %f]\n",i/4
,w->pos_[i]
,w->pos_[i+1]
,w->pos_[i+2]
);
printf("vel[%i] = [%f %f %f]\n",i/4
,w->vel_[i]
,w->vel_[i+1]
,w->vel_[i+2]
);
}
}
void test_pwparams(ParticleWorld<float,unified> *pw)
{
hipLaunchKernelGGL(( d_test_pwparams), dim3(1),dim3(1), 0, 0, pw);
hipDeviceSynchronize();
}
void test_initGrid(unsigned *size, float spacing, float jitter, ParticleWorld<float, unified> &pw)
{
unsigned numParticles = pw.size_;
float radius = pw.params_->particleRadius_;
srand(1973);
for (unsigned z = 0; z<size[2]; z++) {
for (unsigned y = 0; y<size[1]; y++) {
for (unsigned x = 0; x<size[0]; x++) {
unsigned i = (z*size[1] * size[0]) + (y*size[0]) + x;
if (i < numParticles) {
pw.pos_[i * 4] = (spacing * x) + radius - 1.0f + (frand()*2.0f - 1.0f)*jitter;
pw.pos_[i * 4 + 1] = (spacing * y) + radius - 1.0f + (frand()*2.0f - 1.0f)*jitter;
pw.pos_[i * 4 + 2] = (spacing * z) + radius - 1.0f + (frand()*2.0f - 1.0f)*jitter;
pw.pos_[i * 4 + 3] = 1.0f;
pw.vel_[i * 4] = 0.0f;
pw.vel_[i * 4 + 1] = 0.0f;
pw.vel_[i * 4 + 2] = 0.0f;
pw.vel_[i * 4 + 3] = 0.0f;
}
}
}
}
}
void test_cudaFree()
{
int *test = nullptr;
cudaCheck(hipFree(test));
hipDeviceSynchronize();
int _size = 10;
ParticleWorld<float,unified> *myWorld = new ParticleWorld<float,unified>(_size);
myWorld->size_ = _size;
myWorld->params_->numParticles_ = 10;
myWorld->params_->spring_ = 0.5f;
myWorld->params_->damping_ = 0.02f;
myWorld->params_->shear_ = 0.1f;
myWorld->params_->attraction_ = 0.0f;
myWorld->params_->gravity_ = Vector3<float>(0, 0, -0.0003f);
myWorld->params_->globalDamping_ = 1.0f;
myWorld->params_->boundaryDamping_ = -0.5f;
myWorld->params_->particleRadius_ = 1.0f / 64.0f;
myWorld->params_->origin_ = Vector3<float>(-1.0f, -1.0f, -1.0f);
myWorld->params_->gridx_ = 64;
myWorld->params_->gridy_ = 64;
myWorld->params_->gridz_ = 64;
myWorld->params_->timeStep_ = 0.5f;
float jitter = myWorld->params_->particleRadius_ * 0.01f;
unsigned int s = (int)::ceil(::pow((float)myWorld->size_, 1.0f / 3.0f));
unsigned int gridSize[3];
gridSize[0] = gridSize[1] = gridSize[2] = s;
test_initGrid(gridSize, myWorld->params_->particleRadius_*2.0f, jitter, *myWorld);
float diameter = 2.0f * myWorld->params_->particleRadius_;
Vector3<float> cellS(diameter, diameter, diameter);
HashGrid<float,unified> *grid = new HashGrid<float,unified>(_size,
myWorld->params_->gridx_,
myWorld->params_->gridy_,
myWorld->params_->gridz_,
cellS,
myWorld->params_->origin_
);
test_pwparams(myWorld);
hipLaunchKernelGGL(( hashgrid_size), dim3(1),dim3(1), 0, 0, grid);
hipDeviceSynchronize();
delete myWorld;
delete grid;
}
|
e232af489568a5b517727ee066e34faaaffda971.cu
|
#include <stdio.h>
#include <common.h>
#include <perftimer.h>
#include <cfloat>
#include <difi.cuh>
#include <cutil_math.cuh>
#include <aabb3.h>
#include <boundingvolumetree3.h>
#include <thrust/tuple.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sort.h>
#include <cmath>
#include <worldparameters.h>
#include <uniformgrid.cuh>
#include <ext_unit_tests.cuh>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <particleworld.hpp>
#include <hashgrid.hpp>
inline float frand()
{
return rand() / (float)RAND_MAX;
}
inline void cuErr(cudaError_t status, const char *file, const int line)
{
if(status != cudaSuccess) {
std::cerr << "Cuda API error: ";
std::cerr << cudaGetErrorString(status);
std::cerr << " at line " << line;
std::cerr << " of file " << file << std::endl;
std::exit(EXIT_FAILURE);
}
}
__global__ void hashgrid_size(HashGrid<float,unified> *g)
{
printf("HashGrid size = %i\n",g->size_);
printf("HashGrid number of cells = %i\n",g->numCells_);
printf("Grid X Y Z = %i %i %i\n",g->gridx_, g->gridy_, g->gridz_);
printf("Cell size = [%f %f %f]\n" ,g->cellSize_.x
,g->cellSize_.y
,g->cellSize_.z);
printf("Origin = [%f %f %f]\n" ,g->origin_.x
,g->origin_.y
,g->origin_.z);
for(int i(0); i < g->size_; ++i)
{
g->hashEntries_[i]=20-i;
g->particleIndices_[i]=20-i;
printf("particleIndices_[%i]=%i\n",i, g->particleIndices_[i]);
}
}
__global__ void d_test_pwparams(ParticleWorld<float,unified> *w)
{
printf("Number of particles: %i\n",w->params_->numParticles_);
printf("Spring = %f\n",w->params_->spring_);
printf("Damping = %f\n",w->params_->damping_);
printf("Shear = %f\n",w->params_->shear_);
printf("Attraction = %f\n",w->params_->attraction_);
printf("Global dampening = %f\n",w->params_->globalDamping_);
printf("Particle radius = %f\n",w->params_->particleRadius_);
printf("Gravity = [%f %f %f]\n",w->params_->gravity_.x
,w->params_->gravity_.y
,w->params_->gravity_.z);
for(int i(0); i < w->size_*4; i+=4)
{
printf("pos[%i] = [%f %f %f]\n",i/4
,w->pos_[i]
,w->pos_[i+1]
,w->pos_[i+2]
);
printf("vel[%i] = [%f %f %f]\n",i/4
,w->vel_[i]
,w->vel_[i+1]
,w->vel_[i+2]
);
}
}
void test_pwparams(ParticleWorld<float,unified> *pw)
{
d_test_pwparams<<<1,1>>>(pw);
cudaDeviceSynchronize();
}
void test_initGrid(unsigned *size, float spacing, float jitter, ParticleWorld<float, unified> &pw)
{
unsigned numParticles = pw.size_;
float radius = pw.params_->particleRadius_;
srand(1973);
for (unsigned z = 0; z<size[2]; z++) {
for (unsigned y = 0; y<size[1]; y++) {
for (unsigned x = 0; x<size[0]; x++) {
unsigned i = (z*size[1] * size[0]) + (y*size[0]) + x;
if (i < numParticles) {
pw.pos_[i * 4] = (spacing * x) + radius - 1.0f + (frand()*2.0f - 1.0f)*jitter;
pw.pos_[i * 4 + 1] = (spacing * y) + radius - 1.0f + (frand()*2.0f - 1.0f)*jitter;
pw.pos_[i * 4 + 2] = (spacing * z) + radius - 1.0f + (frand()*2.0f - 1.0f)*jitter;
pw.pos_[i * 4 + 3] = 1.0f;
pw.vel_[i * 4] = 0.0f;
pw.vel_[i * 4 + 1] = 0.0f;
pw.vel_[i * 4 + 2] = 0.0f;
pw.vel_[i * 4 + 3] = 0.0f;
}
}
}
}
}
void test_cudaFree()
{
int *test = nullptr;
cudaCheck(cudaFree(test));
cudaDeviceSynchronize();
int _size = 10;
ParticleWorld<float,unified> *myWorld = new ParticleWorld<float,unified>(_size);
myWorld->size_ = _size;
myWorld->params_->numParticles_ = 10;
myWorld->params_->spring_ = 0.5f;
myWorld->params_->damping_ = 0.02f;
myWorld->params_->shear_ = 0.1f;
myWorld->params_->attraction_ = 0.0f;
myWorld->params_->gravity_ = Vector3<float>(0, 0, -0.0003f);
myWorld->params_->globalDamping_ = 1.0f;
myWorld->params_->boundaryDamping_ = -0.5f;
myWorld->params_->particleRadius_ = 1.0f / 64.0f;
myWorld->params_->origin_ = Vector3<float>(-1.0f, -1.0f, -1.0f);
myWorld->params_->gridx_ = 64;
myWorld->params_->gridy_ = 64;
myWorld->params_->gridz_ = 64;
myWorld->params_->timeStep_ = 0.5f;
float jitter = myWorld->params_->particleRadius_ * 0.01f;
unsigned int s = (int)std::ceil(std::pow((float)myWorld->size_, 1.0f / 3.0f));
unsigned int gridSize[3];
gridSize[0] = gridSize[1] = gridSize[2] = s;
test_initGrid(gridSize, myWorld->params_->particleRadius_*2.0f, jitter, *myWorld);
float diameter = 2.0f * myWorld->params_->particleRadius_;
Vector3<float> cellS(diameter, diameter, diameter);
HashGrid<float,unified> *grid = new HashGrid<float,unified>(_size,
myWorld->params_->gridx_,
myWorld->params_->gridy_,
myWorld->params_->gridz_,
cellS,
myWorld->params_->origin_
);
test_pwparams(myWorld);
hashgrid_size<<<1,1>>>(grid);
cudaDeviceSynchronize();
delete myWorld;
delete grid;
}
|
fde1376a6fb82a96b3fa457ceacc80c9f16988ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <primitiv/config.h>
#include <random>
#include <primitiv/core/error.h>
#include <primitiv/devices/cuda/device.h>
#include <primitiv/internal/cuda/utils.h>
namespace primitiv {
namespace devices {
std::uint32_t CUDA::num_devices() {
int ret;
CUDA_CALL(::hipGetDeviceCount(&ret));
return ret;
}
void CUDA::assert_support(std::uint32_t device_id) {
if (device_id >= num_devices()) {
PRIMITIV_THROW_ERROR("Invalid device ID: " << device_id);
}
::hipDeviceProp_t prop;
CUDA_CALL(::hipGetDeviceProperties(&prop, device_id));
// Checks compute capability
static const int MIN_CC_MAJOR = 3;
static const int MIN_CC_MINOR = 0;
if (prop.major < MIN_CC_MAJOR ||
(prop.major == MIN_CC_MAJOR && prop.minor < MIN_CC_MINOR)) {
PRIMITIV_THROW_ERROR(
"CUDA Device " << device_id << " does not satisfy the "
"minimum requirement of the compute capability: "
<< prop.major << '.' << prop.minor << " < "
<< MIN_CC_MAJOR << '.' << MIN_CC_MINOR);
}
// Checks other minimum requirements.
#define CHECK_REQUIREMENT(name, value) \
{ \
if (prop.name < (value)) { \
PRIMITIV_THROW_ERROR( \
"CUDA Device " << device_id \
<< " does not satisfy the minimum requirement by primitiv. " \
<< "property: " << #name << ", " \
<< "value: " << prop.name << ", " \
<< "required at least: " << (value)); \
} \
}
#define CHECK_REQUIREMENT_VECTOR(name, index, value) \
{ \
if (prop.name[index] < (value)) { \
PRIMITIV_THROW_ERROR( \
"CUDA Device " << device_id \
<< " does not satisfy the minimum requirement by primitiv. " \
<< "property: " << #name << "[" << #index << "], " \
<< "value: " << prop.name[index] << ", " \
<< "required at least: " << (value)); \
} \
}
CHECK_REQUIREMENT(totalGlobalMem, 1ull * (1ull << 30));
CHECK_REQUIREMENT(sharedMemPerBlock, 16ull * (1ull << 10));
CHECK_REQUIREMENT(maxThreadsPerBlock, 256);
CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 0, 256);
CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 1, 16);
CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 2, 1);
CHECK_REQUIREMENT_VECTOR(maxGridSize, 0, 32767);
CHECK_REQUIREMENT_VECTOR(maxGridSize, 1, 32767);
CHECK_REQUIREMENT_VECTOR(maxGridSize, 2, 32767);
#undef CHECK_REQUIREMENT
#undef CHECK_REQUIREMENT_VECTOR
}
void CUDA::initialize() {
assert_support(dev_id_);
// Retrieves device properties.
::hipDeviceProp_t prop;
CUDA_CALL(::hipGetDeviceProperties(&prop, dev_id_));
// Calculates size of dims to be used in CUDA kernels.
dim1_x_ = 1;
while (dim1_x_ < 1024 &&
dim1_x_ < static_cast<std::uint32_t>(prop.maxThreadsPerBlock)) {
dim1_x_ <<= 1;
}
dim2_y_ = dim1_x_;
dim2_x_ = 1;
while (dim2_x_ < dim2_y_) {
dim2_x_ <<= 1;
dim2_y_ >>= 1;
}
max_batch_ = prop.maxGridSize[1];
// Initializes additional libraries
state_.reset(new cuda::InternalState(dev_id_, rng_seed_));
state_->prop = prop;
// Initializes the device pointer for integer IDs.
ids_ptr_ = state_->pool.allocate(sizeof(std::uint32_t) * max_batch_);
}
CUDA::CUDA(std::uint32_t device_id, std::uint32_t rng_seed)
: dev_id_(device_id)
, rng_seed_(rng_seed) {
initialize();
}
CUDA::CUDA(std::uint32_t device_id)
: CUDA(device_id, std::random_device()()) {}
CUDA::~CUDA() {
// Nothing to do for now.
}
} // namespace devices
} // namespace primitiv
|
fde1376a6fb82a96b3fa457ceacc80c9f16988ce.cu
|
#include <primitiv/config.h>
#include <random>
#include <primitiv/core/error.h>
#include <primitiv/devices/cuda/device.h>
#include <primitiv/internal/cuda/utils.h>
namespace primitiv {
namespace devices {
std::uint32_t CUDA::num_devices() {
int ret;
CUDA_CALL(::cudaGetDeviceCount(&ret));
return ret;
}
void CUDA::assert_support(std::uint32_t device_id) {
if (device_id >= num_devices()) {
PRIMITIV_THROW_ERROR("Invalid device ID: " << device_id);
}
::cudaDeviceProp prop;
CUDA_CALL(::cudaGetDeviceProperties(&prop, device_id));
// Checks compute capability
static const int MIN_CC_MAJOR = 3;
static const int MIN_CC_MINOR = 0;
if (prop.major < MIN_CC_MAJOR ||
(prop.major == MIN_CC_MAJOR && prop.minor < MIN_CC_MINOR)) {
PRIMITIV_THROW_ERROR(
"CUDA Device " << device_id << " does not satisfy the "
"minimum requirement of the compute capability: "
<< prop.major << '.' << prop.minor << " < "
<< MIN_CC_MAJOR << '.' << MIN_CC_MINOR);
}
// Checks other minimum requirements.
#define CHECK_REQUIREMENT(name, value) \
{ \
if (prop.name < (value)) { \
PRIMITIV_THROW_ERROR( \
"CUDA Device " << device_id \
<< " does not satisfy the minimum requirement by primitiv. " \
<< "property: " << #name << ", " \
<< "value: " << prop.name << ", " \
<< "required at least: " << (value)); \
} \
}
#define CHECK_REQUIREMENT_VECTOR(name, index, value) \
{ \
if (prop.name[index] < (value)) { \
PRIMITIV_THROW_ERROR( \
"CUDA Device " << device_id \
<< " does not satisfy the minimum requirement by primitiv. " \
<< "property: " << #name << "[" << #index << "], " \
<< "value: " << prop.name[index] << ", " \
<< "required at least: " << (value)); \
} \
}
CHECK_REQUIREMENT(totalGlobalMem, 1ull * (1ull << 30));
CHECK_REQUIREMENT(sharedMemPerBlock, 16ull * (1ull << 10));
CHECK_REQUIREMENT(maxThreadsPerBlock, 256);
CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 0, 256);
CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 1, 16);
CHECK_REQUIREMENT_VECTOR(maxThreadsDim, 2, 1);
CHECK_REQUIREMENT_VECTOR(maxGridSize, 0, 32767);
CHECK_REQUIREMENT_VECTOR(maxGridSize, 1, 32767);
CHECK_REQUIREMENT_VECTOR(maxGridSize, 2, 32767);
#undef CHECK_REQUIREMENT
#undef CHECK_REQUIREMENT_VECTOR
}
void CUDA::initialize() {
assert_support(dev_id_);
// Retrieves device properties.
::cudaDeviceProp prop;
CUDA_CALL(::cudaGetDeviceProperties(&prop, dev_id_));
// Calculates size of dims to be used in CUDA kernels.
dim1_x_ = 1;
while (dim1_x_ < 1024 &&
dim1_x_ < static_cast<std::uint32_t>(prop.maxThreadsPerBlock)) {
dim1_x_ <<= 1;
}
dim2_y_ = dim1_x_;
dim2_x_ = 1;
while (dim2_x_ < dim2_y_) {
dim2_x_ <<= 1;
dim2_y_ >>= 1;
}
max_batch_ = prop.maxGridSize[1];
// Initializes additional libraries
state_.reset(new cuda::InternalState(dev_id_, rng_seed_));
state_->prop = prop;
// Initializes the device pointer for integer IDs.
ids_ptr_ = state_->pool.allocate(sizeof(std::uint32_t) * max_batch_);
}
CUDA::CUDA(std::uint32_t device_id, std::uint32_t rng_seed)
: dev_id_(device_id)
, rng_seed_(rng_seed) {
initialize();
}
CUDA::CUDA(std::uint32_t device_id)
: CUDA(device_id, std::random_device()()) {}
CUDA::~CUDA() {
// Nothing to do for now.
}
} // namespace devices
} // namespace primitiv
|
10b715683a4fcf461114bc1b75867292b795a5ad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _jkcudachess_KERNEL_H_
#define _jkcudachess_KERNEL_H_
#include <stdio.h>
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
__global__ void testKernel( float* g_idata, float* g_odata)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// read in input data from global memory
// use the bank checker macro to check for bank conflicts during host
// emulation
SDATA(tid) = g_idata[tid];
__syncthreads();
// perform some computations
SDATA(tid) = (float) num_threads * SDATA( tid);
__syncthreads();
// write data to global memory
g_odata[tid] = SDATA(tid);
}
__global__ void vecAdd(float* A,float* B,float* C)
{
int i =blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B [i];
}
#endif // #ifndef _jkcudachess_KERNEL_H_
|
10b715683a4fcf461114bc1b75867292b795a5ad.cu
|
#ifndef _jkcudachess_KERNEL_H_
#define _jkcudachess_KERNEL_H_
#include <stdio.h>
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
__global__ void testKernel( float* g_idata, float* g_odata)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// read in input data from global memory
// use the bank checker macro to check for bank conflicts during host
// emulation
SDATA(tid) = g_idata[tid];
__syncthreads();
// perform some computations
SDATA(tid) = (float) num_threads * SDATA( tid);
__syncthreads();
// write data to global memory
g_odata[tid] = SDATA(tid);
}
__global__ void vecAdd(float* A,float* B,float* C)
{
int i =blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B [i];
}
#endif // #ifndef _jkcudachess_KERNEL_H_
|
0b255678189d6e88d5fe11e8490a3208a34066b4.hip
|
// !!! This is a file automatically generated by hipify!!!
#if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <sys/socket.h>
#include <sys/time.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "tbb/concurrent_hash_map.h"
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/tick_count.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/concurrent_vector.h"
#include "utility.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/system/hip/experimental/pinned_allocator.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/for_each.h>
#include "csv.hpp"
#include "timer.h"
using namespace tbb;
using namespace std;
std::vector<string> timestamp;
#define min(a, b) (a < b ? a : b)
typedef long mytype;
typedef thrust::host_vector<mytype, thrust::hip::experimental::pinned_allocator<mytype> > pinnedVector1;
typedef thrust::host_vector<mytype, thrust::hip::experimental::pinned_allocator<mytype> > pinnedVector2;
#define TV1 1
#define TV2 2
int main(int argc, char** argv) {
int N = atoi(argv[2]);
unsigned int t, travdirtime;
hipSetDevice(0);
pinnedVector1 hi1(N);
pinnedVector2 ho1(N);
hipSetDevice(1);
pinnedVector1 hi2(N);
pinnedVector2 ho2(N);
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
cout << "read ERROR" << endl;
return 1;
}
long size=atoi(argv[2]);
for (unsigned int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
std::string tms = rec[0];
std::string bytes = rec[20];
for(size_t c = tms.find_first_of("\""); c != string::npos; c = c = tms.find_first_of("\"")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("/"); c != string::npos; c = c = tms.find_first_of("/")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("."); c != string::npos; c = c = tms.find_first_of(".")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(" "); c != string::npos; c = c = tms.find_first_of(" ")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(":"); c != string::npos; c = c = tms.find_first_of(":")){
tms.erase(c,1);
}
for(size_t c = bytes.find_first_of("\""); c != string::npos; c = c = bytes.find_first_of("\"")){
bytes.erase(c,1);
}
hi1.push_back(stol(tms));
hi2.push_back(stol(tms));
ho1.push_back(stol(bytes));
ho2.push_back(stol(bytes));
}
hipSetDevice(0);
thrust::device_vector<mytype> di1(N);
thrust::device_vector<mytype> do1(N);
hipSetDevice(1);
thrust::device_vector<mytype> di2(N);
thrust::device_vector<mytype> do2(N);
hipSetDevice(0);
thrust::fill(hi1.begin(), hi1.end(), TV1);
thrust::sequence(do1.begin(), do1.end());
hipSetDevice(1);
thrust::fill(hi2.begin(), hi2.end(), TV2);
thrust::sequence(do2.begin(), do2.end());
hipStream_t s1, s2;
hipSetDevice(0);
hipStreamCreate(&s1);
hipSetDevice(1);
hipStreamCreate(&s2);
start_timer(&t);
hipSetDevice(0);
hipMemcpyAsync(thrust::raw_pointer_cast(di1.data()), thrust::raw_pointer_cast(hi1.data()), di1.size()*sizeof(mytype), hipMemcpyHostToDevice, s1);
hipMemcpyAsync(thrust::raw_pointer_cast(di2.data()), thrust::raw_pointer_cast(hi2.data()), di2.size()*sizeof(mytype), hipMemcpyHostToDevice, s2);
hipDeviceSynchronize();
travdirtime = stop_timer(&t);
print_timer(travdirtime);
}
|
0b255678189d6e88d5fe11e8490a3208a34066b4.cu
|
#if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <sys/socket.h>
#include <sys/time.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "tbb/concurrent_hash_map.h"
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/tick_count.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/concurrent_vector.h"
#include "utility.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/system/cuda/experimental/pinned_allocator.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/for_each.h>
#include "csv.hpp"
#include "timer.h"
using namespace tbb;
using namespace std;
std::vector<string> timestamp;
#define min(a, b) (a < b ? a : b)
typedef long mytype;
typedef thrust::host_vector<mytype, thrust::cuda::experimental::pinned_allocator<mytype> > pinnedVector1;
typedef thrust::host_vector<mytype, thrust::cuda::experimental::pinned_allocator<mytype> > pinnedVector2;
#define TV1 1
#define TV2 2
int main(int argc, char** argv) {
int N = atoi(argv[2]);
unsigned int t, travdirtime;
cudaSetDevice(0);
pinnedVector1 hi1(N);
pinnedVector2 ho1(N);
cudaSetDevice(1);
pinnedVector1 hi2(N);
pinnedVector2 ho2(N);
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
cout << "read ERROR" << endl;
return 1;
}
long size=atoi(argv[2]);
for (unsigned int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
std::string tms = rec[0];
std::string bytes = rec[20];
for(size_t c = tms.find_first_of("\""); c != string::npos; c = c = tms.find_first_of("\"")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("/"); c != string::npos; c = c = tms.find_first_of("/")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("."); c != string::npos; c = c = tms.find_first_of(".")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(" "); c != string::npos; c = c = tms.find_first_of(" ")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(":"); c != string::npos; c = c = tms.find_first_of(":")){
tms.erase(c,1);
}
for(size_t c = bytes.find_first_of("\""); c != string::npos; c = c = bytes.find_first_of("\"")){
bytes.erase(c,1);
}
hi1.push_back(stol(tms));
hi2.push_back(stol(tms));
ho1.push_back(stol(bytes));
ho2.push_back(stol(bytes));
}
cudaSetDevice(0);
thrust::device_vector<mytype> di1(N);
thrust::device_vector<mytype> do1(N);
cudaSetDevice(1);
thrust::device_vector<mytype> di2(N);
thrust::device_vector<mytype> do2(N);
cudaSetDevice(0);
thrust::fill(hi1.begin(), hi1.end(), TV1);
thrust::sequence(do1.begin(), do1.end());
cudaSetDevice(1);
thrust::fill(hi2.begin(), hi2.end(), TV2);
thrust::sequence(do2.begin(), do2.end());
cudaStream_t s1, s2;
cudaSetDevice(0);
cudaStreamCreate(&s1);
cudaSetDevice(1);
cudaStreamCreate(&s2);
start_timer(&t);
cudaSetDevice(0);
cudaMemcpyAsync(thrust::raw_pointer_cast(di1.data()), thrust::raw_pointer_cast(hi1.data()), di1.size()*sizeof(mytype), cudaMemcpyHostToDevice, s1);
cudaMemcpyAsync(thrust::raw_pointer_cast(di2.data()), thrust::raw_pointer_cast(hi2.data()), di2.size()*sizeof(mytype), cudaMemcpyHostToDevice, s2);
cudaDeviceSynchronize();
travdirtime = stop_timer(&t);
print_timer(travdirtime);
}
|
77035a935da11fab97be4a814ff988e901cc9c10.hip
|
// !!! This is a file automatically generated by hipify!!!
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
When using this code in a scientific project, please cite one or all of the
following papers:
* Daniel Ruijters and Philippe Thvenaz,
GPU Prefilter for Accurate Cubic B-Spline Interpolation,
The Computer Journal, vol. 55, no. 1, pp. 15-20, January 2012.
http://dannyruijters.nl/docs/cudaPrefilter3.pdf
* Daniel Ruijters, Bart M. ter Haar Romeny, and Paul Suetens,
Efficient GPU-Based Texture Interpolation using Uniform B-Splines,
Journal of Graphics Tools, vol. 13, no. 4, pp. 61-69, 2008.
\*--------------------------------------------------------------------------*/
#ifndef _2D_CUBIC_BSPLINE_PREFILTER_H_
#define _2D_CUBIC_BSPLINE_PREFILTER_H_
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "helper_math.h"
#include <stdio.h>
//#include <cutil.h>
#include "cubicPrefilter_kernel.cu"
/*
#define b0 = 1.732176555412859f;
#define b1 = -0.464135309171000f;
#define b2 = 0.124364681271139f;
#define b3 = -0.033323415913556f;
#define b4 = 0.008928982383084f;
#define b5 = -0.002392513618779f;
#define b6 = 0.000641072092032f;
#define b7 = -0.000171774749350f;
*/
__constant__ float BSplinePreFilter[8] = {
1.732176555412859f, //b0
-0.464135309171000f, //b1
0.124364681271139f,
-0.033323415913556f,
0.008928982383084f,
-0.002392513618779f,
0.000641072092032f,
-0.000171774749350f, //b7
};
// ***************************************************************************
// * Global GPU procedures
// ***************************************************************************
template<class floatN>
__global__ void SamplesToCoefficients2DX(
floatN* image, // in-place processing
uint pitch, // width in bytes
uint width, // width of the image
uint height) // height of the image
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
floatN* line = (floatN*)((uchar*)image + y * pitch); //direct access
ConvertToInterpolationCoefficients(line, width, sizeof(floatN));
}
template<class floatN>
__global__ void SamplesToCoefficients2DY(
floatN* image, // in-place processing
uint pitch, // width in bytes
uint width, // width of the image
uint height) // height of the image
{
// process lines in x-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
floatN* line = image + x; //direct access
ConvertToInterpolationCoefficients(line, height, pitch);
}
//Kernel
template<class floatN>
__global__ void FIR_2D_X(
floatN* image, // in-place processing
floatN* coeffi, // in-place processing
uint width, // width of the image
uint height) {
const int start_x = blockIdx.x * (blockDim.x - 14);
const int tid_y = blockIdx.y * blockDim.y;
__shared__ float IMG[256];
//
int cache_x = start_x + threadIdx.x - 7;
cache_x = cache_x < 0 ? 0 : cache_x;
cache_x = cache_x > width - 1 ? width - 1 : cache_x;
IMG[threadIdx.x] = image[tid_y*width + cache_x];
__syncthreads();
int tid_x = blockIdx.x * (blockDim.x - 14) + threadIdx.x;
if (threadIdx.x < blockDim.x - 14 && tid_x < width) {
coeffi[tid_y*width + tid_x] =
BSplinePreFilter[7] * (IMG[threadIdx.x + 0] + IMG[threadIdx.x + 14]) +
BSplinePreFilter[6] * (IMG[threadIdx.x + 1] + IMG[threadIdx.x + 13]) +
BSplinePreFilter[5] * (IMG[threadIdx.x + 2] + IMG[threadIdx.x + 12]) +
BSplinePreFilter[4] * (IMG[threadIdx.x + 3] + IMG[threadIdx.x + 11]) +
BSplinePreFilter[3] * (IMG[threadIdx.x + 4] + IMG[threadIdx.x + 10]) +
BSplinePreFilter[2] * (IMG[threadIdx.x + 5] + IMG[threadIdx.x + 9]) +
BSplinePreFilter[1] * (IMG[threadIdx.x + 6] + IMG[threadIdx.x + 8]) +
BSplinePreFilter[0] * (IMG[threadIdx.x + 7])
;
}
}
template<class floatN>
__global__ void FIR_2D_Y(
floatN* image, // in-place processing
floatN* coeffi, // in-place processing
uint width, // width of the image
uint height) {
const int start_y = blockIdx.x * (blockDim.x - 14);
const int tid_x = blockIdx.y * blockDim.y;
__shared__ float IMG[256];
//
int cache_y = start_y + threadIdx.x - 7;
cache_y = cache_y < 0 ? 0 : cache_y;
cache_y = cache_y > height - 1 ? height - 1 : cache_y;
IMG[threadIdx.x] = image[cache_y*width + tid_x];
__syncthreads();
int tid_y = blockIdx.x * (blockDim.x - 14) + threadIdx.x;
if (threadIdx.x < blockDim.x - 14 && tid_y < height) {
coeffi[tid_y*width + tid_x] =
BSplinePreFilter[7] * (IMG[threadIdx.x + 0] + IMG[threadIdx.x + 14]) +
BSplinePreFilter[6] * (IMG[threadIdx.x + 1] + IMG[threadIdx.x + 13]) +
BSplinePreFilter[5] * (IMG[threadIdx.x + 2] + IMG[threadIdx.x + 12]) +
BSplinePreFilter[4] * (IMG[threadIdx.x + 3] + IMG[threadIdx.x + 11]) +
BSplinePreFilter[3] * (IMG[threadIdx.x + 4] + IMG[threadIdx.x + 10]) +
BSplinePreFilter[2] * (IMG[threadIdx.x + 5] + IMG[threadIdx.x + 9]) +
BSplinePreFilter[1] * (IMG[threadIdx.x + 6] + IMG[threadIdx.x + 8]) +
BSplinePreFilter[0] * (IMG[threadIdx.x + 7])
;
}
}
// ***************************************************************************
// * Exported functions
// ***************************************************************************
//! Convert the pixel values into cubic b-spline coefficients
//! @param image pointer to the image bitmap in GPU (device) memory
//! @param pitch width in bytes (including padding bytes)
//! @param width image width in number of pixels
//! @param height image height in number of pixels
template<class floatN>
extern void CubicBSplinePrefilter2D(floatN* image, uint pitch, uint width, uint height)
{
dim3 dimBlockX(min(PowTwoDivider(height), 64));
dim3 dimGridX(height / dimBlockX.x);
SamplesToCoefficients2DX<floatN> << <dimGridX, dimBlockX >> >(image, pitch, width, height);
//CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed");
dim3 dimBlockY(min(PowTwoDivider(width), 64));
dim3 dimGridY(width / dimBlockY.x);
SamplesToCoefficients2DY<floatN> << <dimGridY, dimBlockY >> >(image, pitch, width, height);
//CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed");
}
//! Convert the pixel values into cubic b-spline coefficients
//! @param image pointer to the image bitmap in GPU (device) memory
//! @param pitch width in bytes (including padding bytes)
//! @param width image width in number of pixels
//! @param height image height in number of pixels
//! @note Prints stopwatch feedback
template<class floatN>
extern void CubicBSplinePrefilter2DTimer(floatN* image, uint pitch, uint width, uint height)
{
printf("\nCubic B-Spline Prefilter timer:\n");
unsigned int hTimer;
CUT_SAFE_CALL(cutCreateTimer(&hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimBlockX(min(PowTwoDivider(height), 64));
dim3 dimGridX(height / dimBlockX.x);
SamplesToCoefficients2DX<floatN> << <dimGridX, dimBlockX >> >(image, pitch, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueX = cutGetTimerValue(hTimer);
printf("x-direction : %f msec\n", timerValueX);
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimBlockY(min(PowTwoDivider(width), 64));
dim3 dimGridY(width / dimBlockY.x);
SamplesToCoefficients2DY<floatN> << <dimGridY, dimBlockY >> >(image, pitch, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueY = cutGetTimerValue(hTimer);
printf("y-direction : %f msec\n", timerValueY);
printf("total : %f msec\n\n", timerValueX + timerValueY);
}
//FIR
template<class floatN>
extern void CubicBSplinePrefilter2D_FIR(floatN* image , uint pitch, uint width, uint height)
{
floatN* coeffi_tmp;
hipMalloc((void**)&coeffi_tmp, sizeof(floatN)*width*height);
//interpolation
uint Grid_Width = (width + 241) / 242;
uint Grid_Height = height;
dim3 DimBlock(256, 1);
dim3 DimGrid(Grid_Width, Grid_Height);
FIR_2D_X<floatN> << <DimGrid, DimBlock >> > (image, coeffi_tmp, width, height);
Grid_Width = (height + 241) / 242;
Grid_Height = width;
DimGrid = dim3(Grid_Width, Grid_Height);
FIR_2D_Y<floatN> << <DimGrid, DimBlock >> > (coeffi_tmp, image, width, height);
hipFree(coeffi_tmp);
}
#endif //_2D_CUBIC_BSPLINE_PREFILTER_H_
|
77035a935da11fab97be4a814ff988e901cc9c10.cu
|
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2010, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
When using this code in a scientific project, please cite one or all of the
following papers:
* Daniel Ruijters and Philippe Th¨¦venaz,
GPU Prefilter for Accurate Cubic B-Spline Interpolation,
The Computer Journal, vol. 55, no. 1, pp. 15-20, January 2012.
http://dannyruijters.nl/docs/cudaPrefilter3.pdf
* Daniel Ruijters, Bart M. ter Haar Romeny, and Paul Suetens,
Efficient GPU-Based Texture Interpolation using Uniform B-Splines,
Journal of Graphics Tools, vol. 13, no. 4, pp. 61-69, 2008.
\*--------------------------------------------------------------------------*/
#ifndef _2D_CUBIC_BSPLINE_PREFILTER_H_
#define _2D_CUBIC_BSPLINE_PREFILTER_H_
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "helper_math.h"
#include <stdio.h>
//#include <cutil.h>
#include "cubicPrefilter_kernel.cu"
/*
#define b0 = 1.732176555412859f;
#define b1 = -0.464135309171000f;
#define b2 = 0.124364681271139f;
#define b3 = -0.033323415913556f;
#define b4 = 0.008928982383084f;
#define b5 = -0.002392513618779f;
#define b6 = 0.000641072092032f;
#define b7 = -0.000171774749350f;
*/
__constant__ float BSplinePreFilter[8] = {
1.732176555412859f, //b0
-0.464135309171000f, //b1
0.124364681271139f,
-0.033323415913556f,
0.008928982383084f,
-0.002392513618779f,
0.000641072092032f,
-0.000171774749350f, //b7
};
// ***************************************************************************
// * Global GPU procedures
// ***************************************************************************
template<class floatN>
__global__ void SamplesToCoefficients2DX(
floatN* image, // in-place processing
uint pitch, // width in bytes
uint width, // width of the image
uint height) // height of the image
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
floatN* line = (floatN*)((uchar*)image + y * pitch); //direct access
ConvertToInterpolationCoefficients(line, width, sizeof(floatN));
}
template<class floatN>
__global__ void SamplesToCoefficients2DY(
floatN* image, // in-place processing
uint pitch, // width in bytes
uint width, // width of the image
uint height) // height of the image
{
// process lines in x-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
floatN* line = image + x; //direct access
ConvertToInterpolationCoefficients(line, height, pitch);
}
//Kernel
template<class floatN>
__global__ void FIR_2D_X(
floatN* image, // in-place processing
floatN* coeffi, // in-place processing
uint width, // width of the image
uint height) {
const int start_x = blockIdx.x * (blockDim.x - 14);
const int tid_y = blockIdx.y * blockDim.y;
__shared__ float IMG[256];
//
int cache_x = start_x + threadIdx.x - 7;
cache_x = cache_x < 0 ? 0 : cache_x;
cache_x = cache_x > width - 1 ? width - 1 : cache_x;
IMG[threadIdx.x] = image[tid_y*width + cache_x];
__syncthreads();
int tid_x = blockIdx.x * (blockDim.x - 14) + threadIdx.x;
if (threadIdx.x < blockDim.x - 14 && tid_x < width) {
coeffi[tid_y*width + tid_x] =
BSplinePreFilter[7] * (IMG[threadIdx.x + 0] + IMG[threadIdx.x + 14]) +
BSplinePreFilter[6] * (IMG[threadIdx.x + 1] + IMG[threadIdx.x + 13]) +
BSplinePreFilter[5] * (IMG[threadIdx.x + 2] + IMG[threadIdx.x + 12]) +
BSplinePreFilter[4] * (IMG[threadIdx.x + 3] + IMG[threadIdx.x + 11]) +
BSplinePreFilter[3] * (IMG[threadIdx.x + 4] + IMG[threadIdx.x + 10]) +
BSplinePreFilter[2] * (IMG[threadIdx.x + 5] + IMG[threadIdx.x + 9]) +
BSplinePreFilter[1] * (IMG[threadIdx.x + 6] + IMG[threadIdx.x + 8]) +
BSplinePreFilter[0] * (IMG[threadIdx.x + 7])
;
}
}
template<class floatN>
__global__ void FIR_2D_Y(
floatN* image, // in-place processing
floatN* coeffi, // in-place processing
uint width, // width of the image
uint height) {
const int start_y = blockIdx.x * (blockDim.x - 14);
const int tid_x = blockIdx.y * blockDim.y;
__shared__ float IMG[256];
//
int cache_y = start_y + threadIdx.x - 7;
cache_y = cache_y < 0 ? 0 : cache_y;
cache_y = cache_y > height - 1 ? height - 1 : cache_y;
IMG[threadIdx.x] = image[cache_y*width + tid_x];
__syncthreads();
int tid_y = blockIdx.x * (blockDim.x - 14) + threadIdx.x;
if (threadIdx.x < blockDim.x - 14 && tid_y < height) {
coeffi[tid_y*width + tid_x] =
BSplinePreFilter[7] * (IMG[threadIdx.x + 0] + IMG[threadIdx.x + 14]) +
BSplinePreFilter[6] * (IMG[threadIdx.x + 1] + IMG[threadIdx.x + 13]) +
BSplinePreFilter[5] * (IMG[threadIdx.x + 2] + IMG[threadIdx.x + 12]) +
BSplinePreFilter[4] * (IMG[threadIdx.x + 3] + IMG[threadIdx.x + 11]) +
BSplinePreFilter[3] * (IMG[threadIdx.x + 4] + IMG[threadIdx.x + 10]) +
BSplinePreFilter[2] * (IMG[threadIdx.x + 5] + IMG[threadIdx.x + 9]) +
BSplinePreFilter[1] * (IMG[threadIdx.x + 6] + IMG[threadIdx.x + 8]) +
BSplinePreFilter[0] * (IMG[threadIdx.x + 7])
;
}
}
// ***************************************************************************
// * Exported functions
// ***************************************************************************
//! Convert the pixel values into cubic b-spline coefficients
//! @param image pointer to the image bitmap in GPU (device) memory
//! @param pitch width in bytes (including padding bytes)
//! @param width image width in number of pixels
//! @param height image height in number of pixels
template<class floatN>
extern void CubicBSplinePrefilter2D(floatN* image, uint pitch, uint width, uint height)
{
dim3 dimBlockX(min(PowTwoDivider(height), 64));
dim3 dimGridX(height / dimBlockX.x);
SamplesToCoefficients2DX<floatN> << <dimGridX, dimBlockX >> >(image, pitch, width, height);
//CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed");
dim3 dimBlockY(min(PowTwoDivider(width), 64));
dim3 dimGridY(width / dimBlockY.x);
SamplesToCoefficients2DY<floatN> << <dimGridY, dimBlockY >> >(image, pitch, width, height);
//CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed");
}
//! Convert the pixel values into cubic b-spline coefficients
//! @param image pointer to the image bitmap in GPU (device) memory
//! @param pitch width in bytes (including padding bytes)
//! @param width image width in number of pixels
//! @param height image height in number of pixels
//! @note Prints stopwatch feedback
template<class floatN>
extern void CubicBSplinePrefilter2DTimer(floatN* image, uint pitch, uint width, uint height)
{
printf("\nCubic B-Spline Prefilter timer:\n");
unsigned int hTimer;
CUT_SAFE_CALL(cutCreateTimer(&hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimBlockX(min(PowTwoDivider(height), 64));
dim3 dimGridX(height / dimBlockX.x);
SamplesToCoefficients2DX<floatN> << <dimGridX, dimBlockX >> >(image, pitch, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueX = cutGetTimerValue(hTimer);
printf("x-direction : %f msec\n", timerValueX);
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimBlockY(min(PowTwoDivider(width), 64));
dim3 dimGridY(width / dimBlockY.x);
SamplesToCoefficients2DY<floatN> << <dimGridY, dimBlockY >> >(image, pitch, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueY = cutGetTimerValue(hTimer);
printf("y-direction : %f msec\n", timerValueY);
printf("total : %f msec\n\n", timerValueX + timerValueY);
}
//FIR
template<class floatN>
extern void CubicBSplinePrefilter2D_FIR(floatN* image , uint pitch, uint width, uint height)
{
floatN* coeffi_tmp;
cudaMalloc((void**)&coeffi_tmp, sizeof(floatN)*width*height);
//interpolation
uint Grid_Width = (width + 241) / 242;
uint Grid_Height = height;
dim3 DimBlock(256, 1);
dim3 DimGrid(Grid_Width, Grid_Height);
FIR_2D_X<floatN> << <DimGrid, DimBlock >> > (image, coeffi_tmp, width, height);
Grid_Width = (height + 241) / 242;
Grid_Height = width;
DimGrid = dim3(Grid_Width, Grid_Height);
FIR_2D_Y<floatN> << <DimGrid, DimBlock >> > (coeffi_tmp, image, width, height);
cudaFree(coeffi_tmp);
}
#endif //_2D_CUBIC_BSPLINE_PREFILTER_H_
|
d8c38f7d783aa7a4cf0b889d2fbdfbe218e18f5e.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
#include "hip/hip_runtime.h"
// #define USE_PRINTF_FOR_DEBUG
// #define ENABLE_REF_CHECK
#define BLOCK_SIZE_MAX_X 16 // i.e. maximum number of threads per block (x dimension)
// Note: In this particular application it should be a power
// of 2
#define BLOCK_SIZE_MAX_Y 16 // i.e. maximum number of threads per block (y dimension)
// Note: In this particular application it should be a power
// of 2
#define BLOCK_SIZE_HISTO_MAX_X 22 // i.e. maximum number of threads per block (x dimension)
#define BLOCK_SIZE_HISTO_MAX_Y 22 // i.e. maximum number of threads per block (y dimension)
#define BLOCK_SIZE_SCAN_MAX 512 // i.e. maximum number of threads per block (x dimension)
__global__
void global_find_min_max(float *d_Out,
const float *d_In,
int numRows,
int numCols,
bool isInitialRun)
{
__shared__ float _sharedVals[2 * BLOCK_SIZE_MAX_X * BLOCK_SIZE_MAX_Y];
int threadsPerBlock = blockDim.x * blockDim.y;
int blockId = blockIdx.x + (blockIdx.y * gridDim.x);
int threadId = threadIdx.x + (threadIdx.y * blockDim.x);
int myId = (blockId * threadsPerBlock) + threadId;
// Let's calculate total number of pixels (just once)
const int numPixelTotal =
numRows * numCols;
// Let's determine the number of pixel this block is working on
const int numPixelBlock =
(blockDim.x * blockDim.y);
if ( myId >= numPixelTotal )
{
return;
}
else
{
// Let's determine the index inside of this block
int tid =
threadIdx.y * blockDim.x + threadIdx.x;
// Fetch values into shared memory
if (isInitialRun)
{
_sharedVals[tid] =
d_In[myId];
_sharedVals[tid + numPixelBlock] =
d_In[myId];
}
else
{
_sharedVals[tid] =
d_In[myId];
_sharedVals[tid + numPixelBlock] =
d_In[myId + numPixelTotal];
}
// do reduction in global mem
for (unsigned int s = numPixelBlock / 2; s > 0; s >>= 1)
{
if (tid < s &&
(myId + s) < numPixelTotal)
{
// Min
_sharedVals[tid] =
min(_sharedVals[tid], _sharedVals[tid + s]);
// Max
_sharedVals[tid + numPixelBlock] =
max(_sharedVals[tid + numPixelBlock], _sharedVals[tid + numPixelBlock + s]);
}
__syncthreads(); // make sure all min/max at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
if (gridDim.x > 1 || gridDim.y > 1)
{
// Min
d_Out[myId / numPixelBlock] = d_In[myId];
d_Out[myId / numPixelBlock] = _sharedVals[tid];
// Max
d_Out[(myId / numPixelBlock) + numPixelBlock] = d_In[myId + numPixelTotal];
d_Out[(myId / numPixelBlock) + numPixelBlock] = _sharedVals[tid + numPixelBlock];
}
else
{
// Min
d_Out[myId / numPixelBlock] = d_In[myId];
d_Out[myId / numPixelBlock] = _sharedVals[tid];
// Max
d_Out[(myId / numPixelBlock) + 1] = d_In[myId + numPixelTotal];
d_Out[(myId / numPixelBlock) + 1] = _sharedVals[tid + numPixelBlock];
}
}
}
}
__global__
void simple_histo(unsigned int *d_bins,
const float *d_In,
const unsigned int BIN_COUNT,
float _min,
float _range,
int numRows, int numCols)
{
int threadsPerBlock = blockDim.x * blockDim.y;
int blockId = blockIdx.x + (blockIdx.y * gridDim.x);
int threadId = threadIdx.x + (threadIdx.y * blockDim.x);
int myId = (blockId * threadsPerBlock) + threadId;
// Let's calculate total number of pixels (just once)
const int numPixelTotal =
numRows * numCols;
if ( myId >= numPixelTotal )
{
return;
}
else
{
float myItem = d_In[myId];
// int myBin = ((myItem - d_Min[0]) / (d_Max[0] - d_Min[0])) * BIN_COUNT;
unsigned int myBin =
min(
static_cast<unsigned int>(BIN_COUNT - 1),
static_cast<unsigned int>((myItem - _min) / _range * BIN_COUNT));
atomicAdd(&(d_bins[myBin]), 1);
}
}
// Exclusive Scan (Blelloch)
__global__
void scanKernelExclusive(const unsigned int *d_In,
unsigned int *d_Out,
size_t size,
size_t offset,
bool isLastCall)
{
// Stores boundary values to account for sizes that are not powers of 2
__shared__ unsigned int _boundaryValueCurrent;
__shared__ unsigned int _finalAdd;
unsigned int _finalRemember;
__shared__ unsigned int _sharedVals[BLOCK_SIZE_SCAN_MAX];
int myId =
threadIdx.x;
if (myId == 0)
{
_boundaryValueCurrent = 0;
_finalRemember =
d_In[offset + size - 1];
if (offset > 0)
{
_finalAdd =
d_Out[0] + d_Out[offset - 1];
}
}
__syncthreads();
if (myId < size)
{
// Initial data fetch
_sharedVals[myId] =
d_In[myId + offset];
__syncthreads();
// Used to track how many steps are left by right-shifting its value
// (i.e. implicitely calculating log2 of the size)
size_t _stepsLeft =
size;
// Which neighbor to the left has to be added?
unsigned int _neighbor =
1;
// Is it my turn to add?
unsigned int _selfMask =
1;
// Step 1: Adding neighbors
while (_stepsLeft)
{
if ((_selfMask & myId) == _selfMask)
{
_sharedVals[myId] +=
_sharedVals[myId - _neighbor];
}
_stepsLeft >>= 1;
_neighbor <<= 1;
_selfMask <<= 1;
_selfMask++;
__syncthreads();
}
// Step 2: Down-sweep and adding neighbors again
// Adjustment to properly start
_selfMask--;
_selfMask >>= 1;
_neighbor >>= 1;
_stepsLeft = size;
while (_stepsLeft)
{
bool _fillInBoundaryValue =
true;
if ((_selfMask & myId) == _selfMask)
{
unsigned int _tmp =
_sharedVals[myId];
_sharedVals[myId] +=
_sharedVals[myId - _neighbor];
_sharedVals[myId - _neighbor] =
_tmp;
_fillInBoundaryValue =
false;
}
__syncthreads();
// Cross-sweep of boundary value
unsigned int _selfMaskCrossSweep =
_selfMask >> 1;
if (_fillInBoundaryValue)
{
if (((_selfMask & myId) ^ _selfMaskCrossSweep) == 0)
{
if ((myId + _neighbor) >= size)
{
unsigned int _boundaryValueTmp =
_boundaryValueCurrent + _sharedVals[(myId)];
_sharedVals[myId] =
_boundaryValueCurrent;
_boundaryValueCurrent =
_boundaryValueTmp;
}
}
}
_selfMask--;
_selfMask >>= 1;
_neighbor >>= 1;
_stepsLeft >>= 1;
__syncthreads();
}
if (offset > 0)
{
_sharedVals[myId] +=
_finalAdd;
}
__syncthreads();
d_Out[myId + offset] =
_sharedVals[myId];
if (myId == 0)
{
if (isLastCall)
{
d_Out[0] =
0;
}
else
{
d_Out[0] =
_finalRemember;
}
}
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
#if defined(USE_PRINTF_FOR_DEBUG)
printf("Image is %i columns x %i rows\n",
numCols,
numRows);
printf("Number of bins is %i\n",
numBins);
#endif
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
*/
int gridSizeX = (numCols - 1) / BLOCK_SIZE_MAX_X + 1;
int gridSizeY = (numRows - 1) / BLOCK_SIZE_MAX_Y + 1;
// Block size (i.e., number of threads per block)
dim3 blockSize(BLOCK_SIZE_MAX_X, BLOCK_SIZE_MAX_Y, 1);
// Compute grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
dim3 gridSize(gridSizeX, gridSizeY, 1);
float *d_IntermediateOut;
// Allocate memory on the device for storing the intermediate output values
checkCudaErrors(
hipMalloc(
&d_IntermediateOut,
max((unsigned int)(2 * sizeof(float) * gridSizeX * gridSizeY), (unsigned int)(sizeof(unsigned int) * numBins))));
checkCudaErrors(
hipMemset(
d_IntermediateOut,
0x0,
2 * sizeof(float) * gridSizeX * gridSizeY));
#if defined(USE_PRINTF_FOR_DEBUG)
float *h_Intermediate =
new float[2 * sizeof(float) * numRows * numCols];
memset(
h_Intermediate,
0x0,
2 * sizeof(float) * numRows * numCols);
checkCudaErrors(
hipMemcpy(
h_Intermediate,
d_logLuminance,
sizeof(float) * numRows * numCols,
hipMemcpyDeviceToHost));
checkCudaErrors(
hipMemcpy(
h_Intermediate,
d_IntermediateIn,
2 * sizeof(float) * numRows * numCols,
hipMemcpyDeviceToHost));
float h_Out = 0;
printf("Blocksize\tX: %i\tY: %i\tZ: %i\n",
blockSize.x,
blockSize.y,
blockSize.z);
printf("Gridsize\tX: %i\tY: %i\tZ: %i\n",
gridSize.x,
gridSize.y,
gridSize.z);
#endif
hipLaunchKernelGGL(( global_find_min_max), dim3(gridSize), dim3(blockSize), 0, 0,
d_IntermediateOut,
d_logLuminance,
numRows,
numCols,
true);
#if defined(USE_PRINTF_FOR_DEBUG)
checkCudaErrors(
hipMemcpy(
h_Intermediate,
d_IntermediateOut,
2 * sizeof(float) * gridSizeX * gridSizeY,
hipMemcpyDeviceToHost));
#endif
hipLaunchKernelGGL(( global_find_min_max), dim3(1), dim3(blockSize), 0, 0,
d_IntermediateOut,
d_IntermediateOut,
gridSizeX,
gridSizeY,
false);
#if defined(USE_PRINTF_FOR_DEBUG)
checkCudaErrors(
hipMemcpy(
&h_Out,
&d_IntermediateOut[0],
sizeof(float),
hipMemcpyDeviceToHost));
printf("Min: %f\n", h_Out);
checkCudaErrors(
hipMemcpy(
&h_Out,
&d_IntermediateOut[1],
sizeof(float),
hipMemcpyDeviceToHost));
printf("Max: %f\n", h_Out);
#endif
/*
2) subtract them to find the range
*/
float h_MinMaxOut[2];
checkCudaErrors(
hipMemcpy(
&h_MinMaxOut[0],
d_IntermediateOut,
2 * sizeof(float),
hipMemcpyDeviceToHost));
min_logLum =
h_MinMaxOut[0];
max_logLum =
h_MinMaxOut[1];
float _logLumRange = max_logLum - min_logLum;
/*
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
*/
unsigned int *d_Bins =
reinterpret_cast<unsigned int *>(d_IntermediateOut);
// Allocate memory on the device for storing the intermediate output values
/*
checkCudaErrors(
hipMalloc(
&d_Bins,
sizeof(unsigned int) * numBins));
*/
checkCudaErrors(
hipMemset(
d_Bins,
0x0,
sizeof(unsigned int) * numBins));
#if defined(USE_PRINTF_FOR_DEBUG)
unsigned int *h_Bins =
new unsigned int[numBins];
memset(h_Bins, 0x0, sizeof(unsigned int) * numBins);
#endif
gridSizeX = (numCols - 1) / BLOCK_SIZE_HISTO_MAX_X + 1;
gridSizeY = (numRows - 1) / BLOCK_SIZE_HISTO_MAX_Y + 1;
// Block size (i.e., number of threads per block)
blockSize.x = BLOCK_SIZE_HISTO_MAX_X;
blockSize.y = BLOCK_SIZE_HISTO_MAX_Y;
// Compute grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
gridSize.x = gridSizeX;
gridSize.y = gridSizeY;
hipLaunchKernelGGL(( simple_histo), dim3(gridSize), dim3(blockSize), 0, 0,
d_Bins,
d_logLuminance,
numBins,
h_MinMaxOut[0],
_logLumRange,
numRows,
numCols);
#if defined(USE_PRINTF_FOR_DEBUG)
checkCudaErrors(
hipMemcpy(
h_Bins,
d_Bins,
sizeof(unsigned int) * numBins,
hipMemcpyDeviceToHost));
#endif
/*
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
gridSizeX = 1;
gridSizeY = 1;
// Compute grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
gridSize.x = gridSizeX;
gridSize.y = gridSizeY;
int _binsLeft =
numBins;
while (_binsLeft)
{
// Block size (i.e., number of threads per block)
blockSize.x =
_binsLeft > BLOCK_SIZE_SCAN_MAX ?
BLOCK_SIZE_SCAN_MAX :
_binsLeft;
blockSize.y = 1;
hipLaunchKernelGGL(( scanKernelExclusive), dim3(gridSize), dim3(blockSize), 0, 0,
d_Bins,
d_cdf,
blockSize.x,
numBins - _binsLeft,
(_binsLeft - blockSize.x) <= 0);
_binsLeft -=
blockSize.x;
}
/****************************************************************************
* You can use the code below to help with debugging, but make sure to *
* comment it out again before submitting your assignment for grading, *
* otherwise this code will take too much time and make it seem like your *
* GPU implementation isn't fast enough. *
* *
* This code generates a reference cdf on the host by running the *
* reference calculation we have given you. It then copies your GPU *
* generated cdf back to the host and calls a function that compares the *
* the two and will output the first location they differ. *
* ************************************************************************* */
#if defined(ENABLE_REF_CHECK)
float *h_logLuminance = new float[numRows * numCols];
unsigned int *h_cdf = new unsigned int[numBins];
unsigned int *h_your_cdf = new unsigned int[numBins];
checkCudaErrors(hipMemcpy(h_logLuminance, d_logLuminance, numCols * numRows * sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_your_cdf, d_cdf, numBins * sizeof(unsigned int), hipMemcpyDeviceToHost));
referenceCalculation(h_logLuminance, h_cdf, numRows, numCols, numBins);
//compare the results of the CDF
// checkResultsExact(h_cdf, h_your_cdf, numBins);
checkResultsEps(h_cdf, h_your_cdf, numBins, 3, 10);
delete[] h_logLuminance;
delete[] h_cdf;
delete[] h_your_cdf;
#endif
checkCudaErrors(hipFree(d_IntermediateOut));
#if defined(USE_PRINTF_FOR_DEBUG)
delete []h_Intermediate;
delete []h_Bins;
#endif
}
|
d8c38f7d783aa7a4cf0b889d2fbdfbe218e18f5e.cu
|
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
#include "cuda_runtime.h"
// #define USE_PRINTF_FOR_DEBUG
// #define ENABLE_REF_CHECK
#define BLOCK_SIZE_MAX_X 16 // i.e. maximum number of threads per block (x dimension)
// Note: In this particular application it should be a power
// of 2
#define BLOCK_SIZE_MAX_Y 16 // i.e. maximum number of threads per block (y dimension)
// Note: In this particular application it should be a power
// of 2
#define BLOCK_SIZE_HISTO_MAX_X 22 // i.e. maximum number of threads per block (x dimension)
#define BLOCK_SIZE_HISTO_MAX_Y 22 // i.e. maximum number of threads per block (y dimension)
#define BLOCK_SIZE_SCAN_MAX 512 // i.e. maximum number of threads per block (x dimension)
__global__
void global_find_min_max(float *d_Out,
const float *d_In,
int numRows,
int numCols,
bool isInitialRun)
{
__shared__ float _sharedVals[2 * BLOCK_SIZE_MAX_X * BLOCK_SIZE_MAX_Y];
int threadsPerBlock = blockDim.x * blockDim.y;
int blockId = blockIdx.x + (blockIdx.y * gridDim.x);
int threadId = threadIdx.x + (threadIdx.y * blockDim.x);
int myId = (blockId * threadsPerBlock) + threadId;
// Let's calculate total number of pixels (just once)
const int numPixelTotal =
numRows * numCols;
// Let's determine the number of pixel this block is working on
const int numPixelBlock =
(blockDim.x * blockDim.y);
if ( myId >= numPixelTotal )
{
return;
}
else
{
// Let's determine the index inside of this block
int tid =
threadIdx.y * blockDim.x + threadIdx.x;
// Fetch values into shared memory
if (isInitialRun)
{
_sharedVals[tid] =
d_In[myId];
_sharedVals[tid + numPixelBlock] =
d_In[myId];
}
else
{
_sharedVals[tid] =
d_In[myId];
_sharedVals[tid + numPixelBlock] =
d_In[myId + numPixelTotal];
}
// do reduction in global mem
for (unsigned int s = numPixelBlock / 2; s > 0; s >>= 1)
{
if (tid < s &&
(myId + s) < numPixelTotal)
{
// Min
_sharedVals[tid] =
min(_sharedVals[tid], _sharedVals[tid + s]);
// Max
_sharedVals[tid + numPixelBlock] =
max(_sharedVals[tid + numPixelBlock], _sharedVals[tid + numPixelBlock + s]);
}
__syncthreads(); // make sure all min/max at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
if (gridDim.x > 1 || gridDim.y > 1)
{
// Min
d_Out[myId / numPixelBlock] = d_In[myId];
d_Out[myId / numPixelBlock] = _sharedVals[tid];
// Max
d_Out[(myId / numPixelBlock) + numPixelBlock] = d_In[myId + numPixelTotal];
d_Out[(myId / numPixelBlock) + numPixelBlock] = _sharedVals[tid + numPixelBlock];
}
else
{
// Min
d_Out[myId / numPixelBlock] = d_In[myId];
d_Out[myId / numPixelBlock] = _sharedVals[tid];
// Max
d_Out[(myId / numPixelBlock) + 1] = d_In[myId + numPixelTotal];
d_Out[(myId / numPixelBlock) + 1] = _sharedVals[tid + numPixelBlock];
}
}
}
}
__global__
void simple_histo(unsigned int *d_bins,
const float *d_In,
const unsigned int BIN_COUNT,
float _min,
float _range,
int numRows, int numCols)
{
int threadsPerBlock = blockDim.x * blockDim.y;
int blockId = blockIdx.x + (blockIdx.y * gridDim.x);
int threadId = threadIdx.x + (threadIdx.y * blockDim.x);
int myId = (blockId * threadsPerBlock) + threadId;
// Let's calculate total number of pixels (just once)
const int numPixelTotal =
numRows * numCols;
if ( myId >= numPixelTotal )
{
return;
}
else
{
float myItem = d_In[myId];
// int myBin = ((myItem - d_Min[0]) / (d_Max[0] - d_Min[0])) * BIN_COUNT;
unsigned int myBin =
min(
static_cast<unsigned int>(BIN_COUNT - 1),
static_cast<unsigned int>((myItem - _min) / _range * BIN_COUNT));
atomicAdd(&(d_bins[myBin]), 1);
}
}
// Exclusive Scan (Blelloch)
__global__
void scanKernelExclusive(const unsigned int *d_In,
unsigned int *d_Out,
size_t size,
size_t offset,
bool isLastCall)
{
// Stores boundary values to account for sizes that are not powers of 2
__shared__ unsigned int _boundaryValueCurrent;
__shared__ unsigned int _finalAdd;
unsigned int _finalRemember;
__shared__ unsigned int _sharedVals[BLOCK_SIZE_SCAN_MAX];
int myId =
threadIdx.x;
if (myId == 0)
{
_boundaryValueCurrent = 0;
_finalRemember =
d_In[offset + size - 1];
if (offset > 0)
{
_finalAdd =
d_Out[0] + d_Out[offset - 1];
}
}
__syncthreads();
if (myId < size)
{
// Initial data fetch
_sharedVals[myId] =
d_In[myId + offset];
__syncthreads();
// Used to track how many steps are left by right-shifting its value
// (i.e. implicitely calculating log2 of the size)
size_t _stepsLeft =
size;
// Which neighbor to the left has to be added?
unsigned int _neighbor =
1;
// Is it my turn to add?
unsigned int _selfMask =
1;
// Step 1: Adding neighbors
while (_stepsLeft)
{
if ((_selfMask & myId) == _selfMask)
{
_sharedVals[myId] +=
_sharedVals[myId - _neighbor];
}
_stepsLeft >>= 1;
_neighbor <<= 1;
_selfMask <<= 1;
_selfMask++;
__syncthreads();
}
// Step 2: Down-sweep and adding neighbors again
// Adjustment to properly start
_selfMask--;
_selfMask >>= 1;
_neighbor >>= 1;
_stepsLeft = size;
while (_stepsLeft)
{
bool _fillInBoundaryValue =
true;
if ((_selfMask & myId) == _selfMask)
{
unsigned int _tmp =
_sharedVals[myId];
_sharedVals[myId] +=
_sharedVals[myId - _neighbor];
_sharedVals[myId - _neighbor] =
_tmp;
_fillInBoundaryValue =
false;
}
__syncthreads();
// Cross-sweep of boundary value
unsigned int _selfMaskCrossSweep =
_selfMask >> 1;
if (_fillInBoundaryValue)
{
if (((_selfMask & myId) ^ _selfMaskCrossSweep) == 0)
{
if ((myId + _neighbor) >= size)
{
unsigned int _boundaryValueTmp =
_boundaryValueCurrent + _sharedVals[(myId)];
_sharedVals[myId] =
_boundaryValueCurrent;
_boundaryValueCurrent =
_boundaryValueTmp;
}
}
}
_selfMask--;
_selfMask >>= 1;
_neighbor >>= 1;
_stepsLeft >>= 1;
__syncthreads();
}
if (offset > 0)
{
_sharedVals[myId] +=
_finalAdd;
}
__syncthreads();
d_Out[myId + offset] =
_sharedVals[myId];
if (myId == 0)
{
if (isLastCall)
{
d_Out[0] =
0;
}
else
{
d_Out[0] =
_finalRemember;
}
}
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
#if defined(USE_PRINTF_FOR_DEBUG)
printf("Image is %i columns x %i rows\n",
numCols,
numRows);
printf("Number of bins is %i\n",
numBins);
#endif
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
*/
int gridSizeX = (numCols - 1) / BLOCK_SIZE_MAX_X + 1;
int gridSizeY = (numRows - 1) / BLOCK_SIZE_MAX_Y + 1;
// Block size (i.e., number of threads per block)
dim3 blockSize(BLOCK_SIZE_MAX_X, BLOCK_SIZE_MAX_Y, 1);
// Compute grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
dim3 gridSize(gridSizeX, gridSizeY, 1);
float *d_IntermediateOut;
// Allocate memory on the device for storing the intermediate output values
checkCudaErrors(
cudaMalloc(
&d_IntermediateOut,
max((unsigned int)(2 * sizeof(float) * gridSizeX * gridSizeY), (unsigned int)(sizeof(unsigned int) * numBins))));
checkCudaErrors(
cudaMemset(
d_IntermediateOut,
0x0,
2 * sizeof(float) * gridSizeX * gridSizeY));
#if defined(USE_PRINTF_FOR_DEBUG)
float *h_Intermediate =
new float[2 * sizeof(float) * numRows * numCols];
memset(
h_Intermediate,
0x0,
2 * sizeof(float) * numRows * numCols);
checkCudaErrors(
cudaMemcpy(
h_Intermediate,
d_logLuminance,
sizeof(float) * numRows * numCols,
cudaMemcpyDeviceToHost));
checkCudaErrors(
cudaMemcpy(
h_Intermediate,
d_IntermediateIn,
2 * sizeof(float) * numRows * numCols,
cudaMemcpyDeviceToHost));
float h_Out = 0;
printf("Blocksize\tX: %i\tY: %i\tZ: %i\n",
blockSize.x,
blockSize.y,
blockSize.z);
printf("Gridsize\tX: %i\tY: %i\tZ: %i\n",
gridSize.x,
gridSize.y,
gridSize.z);
#endif
global_find_min_max<<<gridSize, blockSize>>>
(d_IntermediateOut,
d_logLuminance,
numRows,
numCols,
true);
#if defined(USE_PRINTF_FOR_DEBUG)
checkCudaErrors(
cudaMemcpy(
h_Intermediate,
d_IntermediateOut,
2 * sizeof(float) * gridSizeX * gridSizeY,
cudaMemcpyDeviceToHost));
#endif
global_find_min_max<<<1, blockSize>>>
(d_IntermediateOut,
d_IntermediateOut,
gridSizeX,
gridSizeY,
false);
#if defined(USE_PRINTF_FOR_DEBUG)
checkCudaErrors(
cudaMemcpy(
&h_Out,
&d_IntermediateOut[0],
sizeof(float),
cudaMemcpyDeviceToHost));
printf("Min: %f\n", h_Out);
checkCudaErrors(
cudaMemcpy(
&h_Out,
&d_IntermediateOut[1],
sizeof(float),
cudaMemcpyDeviceToHost));
printf("Max: %f\n", h_Out);
#endif
/*
2) subtract them to find the range
*/
float h_MinMaxOut[2];
checkCudaErrors(
cudaMemcpy(
&h_MinMaxOut[0],
d_IntermediateOut,
2 * sizeof(float),
cudaMemcpyDeviceToHost));
min_logLum =
h_MinMaxOut[0];
max_logLum =
h_MinMaxOut[1];
float _logLumRange = max_logLum - min_logLum;
/*
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
*/
unsigned int *d_Bins =
reinterpret_cast<unsigned int *>(d_IntermediateOut);
// Allocate memory on the device for storing the intermediate output values
/*
checkCudaErrors(
cudaMalloc(
&d_Bins,
sizeof(unsigned int) * numBins));
*/
checkCudaErrors(
cudaMemset(
d_Bins,
0x0,
sizeof(unsigned int) * numBins));
#if defined(USE_PRINTF_FOR_DEBUG)
unsigned int *h_Bins =
new unsigned int[numBins];
memset(h_Bins, 0x0, sizeof(unsigned int) * numBins);
#endif
gridSizeX = (numCols - 1) / BLOCK_SIZE_HISTO_MAX_X + 1;
gridSizeY = (numRows - 1) / BLOCK_SIZE_HISTO_MAX_Y + 1;
// Block size (i.e., number of threads per block)
blockSize.x = BLOCK_SIZE_HISTO_MAX_X;
blockSize.y = BLOCK_SIZE_HISTO_MAX_Y;
// Compute grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
gridSize.x = gridSizeX;
gridSize.y = gridSizeY;
simple_histo<<<gridSize, blockSize>>>(
d_Bins,
d_logLuminance,
numBins,
h_MinMaxOut[0],
_logLumRange,
numRows,
numCols);
#if defined(USE_PRINTF_FOR_DEBUG)
checkCudaErrors(
cudaMemcpy(
h_Bins,
d_Bins,
sizeof(unsigned int) * numBins,
cudaMemcpyDeviceToHost));
#endif
/*
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
gridSizeX = 1;
gridSizeY = 1;
// Compute grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
gridSize.x = gridSizeX;
gridSize.y = gridSizeY;
int _binsLeft =
numBins;
while (_binsLeft)
{
// Block size (i.e., number of threads per block)
blockSize.x =
_binsLeft > BLOCK_SIZE_SCAN_MAX ?
BLOCK_SIZE_SCAN_MAX :
_binsLeft;
blockSize.y = 1;
scanKernelExclusive<<<gridSize, blockSize>>>(
d_Bins,
d_cdf,
blockSize.x,
numBins - _binsLeft,
(_binsLeft - blockSize.x) <= 0);
_binsLeft -=
blockSize.x;
}
/****************************************************************************
* You can use the code below to help with debugging, but make sure to *
* comment it out again before submitting your assignment for grading, *
* otherwise this code will take too much time and make it seem like your *
* GPU implementation isn't fast enough. *
* *
* This code generates a reference cdf on the host by running the *
* reference calculation we have given you. It then copies your GPU *
* generated cdf back to the host and calls a function that compares the *
* the two and will output the first location they differ. *
* ************************************************************************* */
#if defined(ENABLE_REF_CHECK)
float *h_logLuminance = new float[numRows * numCols];
unsigned int *h_cdf = new unsigned int[numBins];
unsigned int *h_your_cdf = new unsigned int[numBins];
checkCudaErrors(cudaMemcpy(h_logLuminance, d_logLuminance, numCols * numRows * sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_your_cdf, d_cdf, numBins * sizeof(unsigned int), cudaMemcpyDeviceToHost));
referenceCalculation(h_logLuminance, h_cdf, numRows, numCols, numBins);
//compare the results of the CDF
// checkResultsExact(h_cdf, h_your_cdf, numBins);
checkResultsEps(h_cdf, h_your_cdf, numBins, 3, 10);
delete[] h_logLuminance;
delete[] h_cdf;
delete[] h_your_cdf;
#endif
checkCudaErrors(cudaFree(d_IntermediateOut));
#if defined(USE_PRINTF_FOR_DEBUG)
delete []h_Intermediate;
delete []h_Bins;
#endif
}
|
fc68e292150ed7de33c091a7dae590075e1a97b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include "rocblas.h"
#include "src/common/cuTimer.cu"
#include "src/common/parseinputs.cpp"
#include "src/training/training.cu"
#include "src/testing/testing.cu"
void runmulticlassifier( char* ,int ,int ,char* ,int ,int ,int ,int ,float ,float ,int ,float);
//MultiClass classification using SVM
int main(int argc, char** argv)
{
CUT_DEVICE_INIT(argc, argv);
runmulticlassifier( "/home/sherrero/NVIDIA_GPU_Computing_SDK/C/src/multisvm_1.0/data/adult/a9a",
32561,
123,
"/home/sherrero/NVIDIA_GPU_Computing_SDK/C/src/multisvm_1.0/data/adult/a9a.t",
16281,
1,
2,
1,
100,
0.001,
0,
0.5);
CUT_EXIT(argc, argv);
}
/**
* Runs both training and testing. Provides timings
* @param trainfilename name of the file containing the training samples
* @param ntraining number of training samples
* @param nfeatures number of features in the each training sample
* @param testfilename name of the file containing the testing samples
* @param ntesting number of testing samples
* @param code {0: One vs All, 1: All vs All, 2: Even vs Odd}
* @param nclasses number of classes in the SVM problem
* @param ntasks number of binary classification tasks
* @param C penalization parameter
* @param tau stopping parameter of the SMO algorithm
* @param kernelcode type of kernel to use
* @param beta if using RBF kernel, the value of beta
*/
void runmulticlassifier(char* trainfilename,
int ntraining,
int nfeatures,
char* testfilename,
int ntesting,
int code,
int nclasses,
int ntasks,
float C,
float tau,
int kernelcode,
float beta)
{
cublasStatus status;
status = hipblasInit();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
}
float* h_C = (float*) malloc(sizeof(float) * ntasks);
for(int i=0; i<ntasks; i++)
{
h_C[i]=C;
}
printf("Input Train File Name: %s\n", trainfilename);
printf("Input Test File Name: %s\n", testfilename);
if( code==0)
{
printf("Code: One Vs All\n");
}
else if( code==1)
{
printf("Code: All Vs All\n");
}
else if( code==2)
{
printf("Code: Even Vs Odd\n");
}
printf("# of training samples: %i\n", ntraining);
printf("# of testing samples: %i\n", ntesting);
printf("# of features: %i\n", nfeatures);
printf("# of tasks: %i\n", ntasks);
printf("# of classes: %i\n", nclasses);
printf("Beta: %f\n", beta);
//Allocate memory for xtraindata
float* h_xtraindata = (float*) malloc(sizeof(float) * ntraining* nfeatures);
float* h_xtraindatatemp = (float*) malloc(sizeof(float) * ntraining* nfeatures);
//Allocate memory for xtestdata
float* h_xtestdata = (float*) malloc(sizeof(float) * ntesting* nfeatures);
float* h_xtestdatatemp = (float*) malloc(sizeof(float) * ntesting* nfeatures);
//Allocate memory for ltraindata
int* h_ltraindata = (int*) malloc(sizeof(int) * ntraining);
//Allocate memory for ltestdata
int* h_ltestdata = (int*) malloc(sizeof(int) * ntesting);
//Parse data from input file
printf("Parsing input data...\n");
parsedatalibsvm(trainfilename, h_xtraindatatemp, h_ltraindata, ntraining, nfeatures, nclasses);
parsedatalibsvm(testfilename, h_xtestdatatemp, h_ltestdata, ntesting, nfeatures, nclasses);
printf("Parsing input data done!\n");
//Allocate memory for rdata
int* h_rdata= (int*) malloc(sizeof(int) * nclasses * ntasks);
if( code==0)
{
generateovacode(h_rdata, nclasses, ntasks);
}
else if( code==1)
{
generateavacode(h_rdata, nclasses, ntasks);
}
else if(code==2)
{
generateevenoddcode(h_rdata, nclasses, ntasks);
}
printcode(h_rdata, nclasses, ntasks);
bool iszero=false;
for (int i=0; i< ntraining; i++)
{
for (int j=0; j<nfeatures; j++)
{
h_xtraindata[j*ntraining+i]=h_xtraindatatemp[i*nfeatures+j];
}
if(h_ltraindata[i]==0)
{
iszero=true;
}
}
for (int i=0; i< ntesting; i++)
{
for (int j=0; j<nfeatures; j++)
{
h_xtestdata[j*ntesting+i]=h_xtestdatatemp[i*nfeatures+j];
}
}
if (iszero)
{
for (int i=0; i< ntraining; i++)
{
h_ltraindata[i]=h_ltraindata[i]+1;
}
for (int i=0; i< ntesting; i++)
{
h_ltestdata[i]=h_ltestdata[i]+1;
}
}
free(h_xtraindatatemp);
free(h_xtestdatatemp);
int* h_ltesthatdata = (int*) malloc(sizeof(int) * ntesting);
//Allocate memory for b
float * h_b= (float*) malloc(sizeof(float) * ntasks);
for (int i=0; i<ntasks; i++)
{
h_b[i]= 0.0f;
}
//Allocate memory for adata
float* h_atraindata= (float*) malloc(sizeof(int) * ntraining * ntasks);
cuResetTimer();
float tA1=cuGetTimer();
printf("Training classifier...\n");
trainclassifier ( h_xtraindata,
h_ltraindata,
h_rdata,
h_atraindata,
ntraining,
nfeatures,
nclasses,
ntasks,
h_C,
h_b,
tau,
kernelcode,
beta);
float tA2=cuGetTimer();
printf("Training classifier done!\n");
printf("Training time Launch =%.1f usec, finished=%.1f msec\n",tA1*1.e3,tA2);
for (int j=0; j<ntasks; j++)
{
int svnum=0;
for (int i=0; i<ntraining; i++)
{
if(h_atraindata[j*ntraining + i]!=0)
{
svnum++;
}
}
printf("Task %i, svnum, %i, b %f\n",j, svnum,h_b[j] );
}
int nSV=0;
for (int i=0; i< ntraining; i++)
{
for (int j=0; j< ntasks; j++)
{
if(h_atraindata[j*ntraining+i]!=0)
{
nSV++;
break;
}
}
}
float* h_xtraindatared = (float*) malloc(sizeof(float) * nSV* nfeatures);
int* h_ltraindatared = (int*) malloc(sizeof(int) * nSV);
float* h_atraindatared = (float*) malloc(sizeof(float) *ntasks* nSV);
int k=0;
for (int i=0; i< ntraining; i++)
{
//Check if SV
bool isSV=false;
for (int j=0; j< ntasks; j++)
{
if(h_atraindata[j*ntraining+i]!=0)
{
isSV=true;
break;
}
}
//If SV copy sample and alphas
if(isSV)
{
for (int j=0; j< ntasks; j++)
{
h_atraindatared[j*nSV +k]= h_atraindata[j*ntraining+i];
}
for (int j=0; j<nfeatures; j++)
{
h_xtraindatared[j*nSV+k]=h_xtraindata[j*ntraining+i];
}
h_ltraindatared[k]= h_ltraindata[i];
k++;
}
}
printf("Testing classifier...\n");
cuResetTimer();
float tB1=cuGetTimer();
testingclassifier( h_xtraindatared,
h_xtestdata,
h_ltraindatared,
h_ltesthatdata,
h_rdata,
h_atraindatared,
nSV,
ntesting,
nfeatures,
nclasses,
ntasks,
h_b,
beta,
kernelcode);
printf("Testing classifier done\n");
float tB2=cuGetTimer();
printf("Testing time Launch =%.1f usec, finished=%.1f msec\n",tB1*1.e3,tB2);
int errors=0;
for (int i=0; i<ntesting; i++)
{
if( h_ltestdata[i]!=h_ltesthatdata[i])
{
errors++;
}
}
printf("# of testing samples %i, # errors %i, Rate %f\n", ntesting, errors, 100* (float) (ntesting -errors)/(float)ntesting);
free(h_rdata);
free(h_xtraindata);
free(h_xtestdata);
free(h_ltraindata);
free(h_ltestdata);
free(h_b);
free(h_atraindata);
free(h_xtraindatared);
free(h_ltraindatared);
free(h_atraindatared);
}
|
fc68e292150ed7de33c091a7dae590075e1a97b1.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <cuda.h>
#include <cutil.h>
#include "cublas.h"
#include "src/common/cuTimer.cu"
#include "src/common/parseinputs.cpp"
#include "src/training/training.cu"
#include "src/testing/testing.cu"
void runmulticlassifier( char* ,int ,int ,char* ,int ,int ,int ,int ,float ,float ,int ,float);
//MultiClass classification using SVM
int main(int argc, char** argv)
{
CUT_DEVICE_INIT(argc, argv);
runmulticlassifier( "/home/sherrero/NVIDIA_GPU_Computing_SDK/C/src/multisvm_1.0/data/adult/a9a",
32561,
123,
"/home/sherrero/NVIDIA_GPU_Computing_SDK/C/src/multisvm_1.0/data/adult/a9a.t",
16281,
1,
2,
1,
100,
0.001,
0,
0.5);
CUT_EXIT(argc, argv);
}
/**
* Runs both training and testing. Provides timings
* @param trainfilename name of the file containing the training samples
* @param ntraining number of training samples
* @param nfeatures number of features in the each training sample
* @param testfilename name of the file containing the testing samples
* @param ntesting number of testing samples
* @param code {0: One vs All, 1: All vs All, 2: Even vs Odd}
* @param nclasses number of classes in the SVM problem
* @param ntasks number of binary classification tasks
* @param C penalization parameter
* @param tau stopping parameter of the SMO algorithm
* @param kernelcode type of kernel to use
* @param beta if using RBF kernel, the value of beta
*/
void runmulticlassifier(char* trainfilename,
int ntraining,
int nfeatures,
char* testfilename,
int ntesting,
int code,
int nclasses,
int ntasks,
float C,
float tau,
int kernelcode,
float beta)
{
cublasStatus status;
status = cublasInit();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
}
float* h_C = (float*) malloc(sizeof(float) * ntasks);
for(int i=0; i<ntasks; i++)
{
h_C[i]=C;
}
printf("Input Train File Name: %s\n", trainfilename);
printf("Input Test File Name: %s\n", testfilename);
if( code==0)
{
printf("Code: One Vs All\n");
}
else if( code==1)
{
printf("Code: All Vs All\n");
}
else if( code==2)
{
printf("Code: Even Vs Odd\n");
}
printf("# of training samples: %i\n", ntraining);
printf("# of testing samples: %i\n", ntesting);
printf("# of features: %i\n", nfeatures);
printf("# of tasks: %i\n", ntasks);
printf("# of classes: %i\n", nclasses);
printf("Beta: %f\n", beta);
//Allocate memory for xtraindata
float* h_xtraindata = (float*) malloc(sizeof(float) * ntraining* nfeatures);
float* h_xtraindatatemp = (float*) malloc(sizeof(float) * ntraining* nfeatures);
//Allocate memory for xtestdata
float* h_xtestdata = (float*) malloc(sizeof(float) * ntesting* nfeatures);
float* h_xtestdatatemp = (float*) malloc(sizeof(float) * ntesting* nfeatures);
//Allocate memory for ltraindata
int* h_ltraindata = (int*) malloc(sizeof(int) * ntraining);
//Allocate memory for ltestdata
int* h_ltestdata = (int*) malloc(sizeof(int) * ntesting);
//Parse data from input file
printf("Parsing input data...\n");
parsedatalibsvm(trainfilename, h_xtraindatatemp, h_ltraindata, ntraining, nfeatures, nclasses);
parsedatalibsvm(testfilename, h_xtestdatatemp, h_ltestdata, ntesting, nfeatures, nclasses);
printf("Parsing input data done!\n");
//Allocate memory for rdata
int* h_rdata= (int*) malloc(sizeof(int) * nclasses * ntasks);
if( code==0)
{
generateovacode(h_rdata, nclasses, ntasks);
}
else if( code==1)
{
generateavacode(h_rdata, nclasses, ntasks);
}
else if(code==2)
{
generateevenoddcode(h_rdata, nclasses, ntasks);
}
printcode(h_rdata, nclasses, ntasks);
bool iszero=false;
for (int i=0; i< ntraining; i++)
{
for (int j=0; j<nfeatures; j++)
{
h_xtraindata[j*ntraining+i]=h_xtraindatatemp[i*nfeatures+j];
}
if(h_ltraindata[i]==0)
{
iszero=true;
}
}
for (int i=0; i< ntesting; i++)
{
for (int j=0; j<nfeatures; j++)
{
h_xtestdata[j*ntesting+i]=h_xtestdatatemp[i*nfeatures+j];
}
}
if (iszero)
{
for (int i=0; i< ntraining; i++)
{
h_ltraindata[i]=h_ltraindata[i]+1;
}
for (int i=0; i< ntesting; i++)
{
h_ltestdata[i]=h_ltestdata[i]+1;
}
}
free(h_xtraindatatemp);
free(h_xtestdatatemp);
int* h_ltesthatdata = (int*) malloc(sizeof(int) * ntesting);
//Allocate memory for b
float * h_b= (float*) malloc(sizeof(float) * ntasks);
for (int i=0; i<ntasks; i++)
{
h_b[i]= 0.0f;
}
//Allocate memory for adata
float* h_atraindata= (float*) malloc(sizeof(int) * ntraining * ntasks);
cuResetTimer();
float tA1=cuGetTimer();
printf("Training classifier...\n");
trainclassifier ( h_xtraindata,
h_ltraindata,
h_rdata,
h_atraindata,
ntraining,
nfeatures,
nclasses,
ntasks,
h_C,
h_b,
tau,
kernelcode,
beta);
float tA2=cuGetTimer();
printf("Training classifier done!\n");
printf("Training time Launch =%.1f usec, finished=%.1f msec\n",tA1*1.e3,tA2);
for (int j=0; j<ntasks; j++)
{
int svnum=0;
for (int i=0; i<ntraining; i++)
{
if(h_atraindata[j*ntraining + i]!=0)
{
svnum++;
}
}
printf("Task %i, svnum, %i, b %f\n",j, svnum,h_b[j] );
}
int nSV=0;
for (int i=0; i< ntraining; i++)
{
for (int j=0; j< ntasks; j++)
{
if(h_atraindata[j*ntraining+i]!=0)
{
nSV++;
break;
}
}
}
float* h_xtraindatared = (float*) malloc(sizeof(float) * nSV* nfeatures);
int* h_ltraindatared = (int*) malloc(sizeof(int) * nSV);
float* h_atraindatared = (float*) malloc(sizeof(float) *ntasks* nSV);
int k=0;
for (int i=0; i< ntraining; i++)
{
//Check if SV
bool isSV=false;
for (int j=0; j< ntasks; j++)
{
if(h_atraindata[j*ntraining+i]!=0)
{
isSV=true;
break;
}
}
//If SV copy sample and alphas
if(isSV)
{
for (int j=0; j< ntasks; j++)
{
h_atraindatared[j*nSV +k]= h_atraindata[j*ntraining+i];
}
for (int j=0; j<nfeatures; j++)
{
h_xtraindatared[j*nSV+k]=h_xtraindata[j*ntraining+i];
}
h_ltraindatared[k]= h_ltraindata[i];
k++;
}
}
printf("Testing classifier...\n");
cuResetTimer();
float tB1=cuGetTimer();
testingclassifier( h_xtraindatared,
h_xtestdata,
h_ltraindatared,
h_ltesthatdata,
h_rdata,
h_atraindatared,
nSV,
ntesting,
nfeatures,
nclasses,
ntasks,
h_b,
beta,
kernelcode);
printf("Testing classifier done\n");
float tB2=cuGetTimer();
printf("Testing time Launch =%.1f usec, finished=%.1f msec\n",tB1*1.e3,tB2);
int errors=0;
for (int i=0; i<ntesting; i++)
{
if( h_ltestdata[i]!=h_ltesthatdata[i])
{
errors++;
}
}
printf("# of testing samples %i, # errors %i, Rate %f\n", ntesting, errors, 100* (float) (ntesting -errors)/(float)ntesting);
free(h_rdata);
free(h_xtraindata);
free(h_xtestdata);
free(h_ltraindata);
free(h_ltestdata);
free(h_b);
free(h_atraindata);
free(h_xtraindatared);
free(h_ltraindatared);
free(h_atraindatared);
}
|
1ef75ae6100d828ebd1ca6c27306aaeae1734fdd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/layer_norm_op.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
// X = X - Y^2
__global__ void sqrtXMinusYSquaredKernel(
const int N,
float* x,
const float* y,
const float epsilon) {
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = sqrtf(x[i] - y[i] * y[i] + epsilon);
}
}
// out[i, j] = (X[i, j] - mu[i]) / sigma[i]
__global__ void normalizeKernel(
const int row_dim,
const int N,
const float* x,
const float* mu,
const float* sigma,
float* out) {
CUDA_1D_KERNEL_LOOP(i, N) {
out[i] = (x[i] - mu[i / row_dim]) / (sigma[i / row_dim]);
}
}
template <typename InputIterator_t>
void allocScratchAndReduce(
InputIterator_t input,
float* output,
int num_segments,
int* seg_indices,
Tensor* scratch,
hipStream_t stream) {
size_t temp_storage_bytes;
hipcub::DeviceSegmentedReduce::Sum(
nullptr, // To retrieve required temporary storage size
temp_storage_bytes, // size_t &temp_storage_bytes
input, // InputIteratorT d_i
output, // OutputIteratorT d_out
num_segments, // int num_segments
seg_indices, // int *d_begin_offsets
seg_indices + 1, // int *d_end_offsets
stream // hipStream_t stream=0
);
size_t temp_storage_floats = temp_storage_bytes / sizeof(float) +
(temp_storage_bytes % sizeof(float) ? 1 : 0);
scratch->Resize(vector<size_t>{temp_storage_floats});
hipcub::DeviceSegmentedReduce::Sum(
scratch->template mutable_data<float>(), // To retrieve required temporary
// storage size
temp_storage_bytes, // size_t &temp_storage_bytes
input, // InputIteratorT d_i
output, // OutputIteratorT d_out
num_segments, // int num_segments
seg_indices, // int *d_begin_offsets
seg_indices + 1, // int *d_end_offsets
stream // hipStream_t stream=0
);
}
} // namespace
template <>
template <>
bool LayerNormOp<CUDAContext>::DoRunWithType<float>() {
const auto& input = Input(0);
auto* output = Output(0);
auto* mean = Output(1);
auto* stdev = Output(2);
CAFFE_ENFORCE_GE(input.dims().size(), 2, "LayerNorm requires input dim >= 2");
const auto canonical_axis = input.canonical_axis_index(axis_);
const int left = input.size_to_dim(canonical_axis);
const int right = input.size_from_dim(canonical_axis);
output->ResizeLike(input);
std::vector<TIndex> stats_dims(
input.dims().begin(), input.dims().begin() + canonical_axis);
stats_dims.push_back(1);
mean->Resize(stats_dims);
stdev->Resize(stats_dims);
std::vector<int> segs(left + 1);
std::iota(segs.begin(), segs.end(), 0);
std::transform(
segs.begin(),
segs.end(),
segs.begin(),
std::bind1st(std::multiplies<int>(), right));
seg_indices_.Resize(vector<size_t>{segs.size()});
context_.CopyBytesFromCPU(
sizeof(int) * segs.size(),
static_cast<void*>(segs.data()),
static_cast<void*>(seg_indices_.mutable_data<int>()));
if (right == 1) {
mean->CopyFrom(input);
mean->Resize(stats_dims);
math::Set<float, CUDAContext>(
left, sqrtf(epsilon_), stdev->mutable_data<float>(), &context_);
} else {
// Calculate row-wise means
// First stage: sum up feature vectors
allocScratchAndReduce(
input.data<float>(),
mean->mutable_data<float>(),
left,
seg_indices_.mutable_data<int>(),
&scratch_,
context_.cuda_stream());
// Second stage: Normalize by feature vector dim
math::Scale<float, CUDAContext>(
left,
1.0f / right,
mean->mutable_data<float>(),
mean->mutable_data<float>(),
&context_);
// Calculate row-wise standard deviation
// First stage: sum up row-wise squared values
SqrTransform<float> transform;
hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
input.data<float>(), transform);
allocScratchAndReduce(
it,
stdev->mutable_data<float>(),
left,
seg_indices_.mutable_data<int>(),
&scratch_,
context_.cuda_stream());
// Second stage: Normalize by feature vector dim
math::Scale<float, CUDAContext>(
left,
1.0f / right,
stdev->mutable_data<float>(),
stdev->mutable_data<float>(),
&context_);
// stddev = sqrt(E(x^2) - E(x)^2 + epsilon)
hipLaunchKernelGGL(( sqrtXMinusYSquaredKernel),
dim3(CAFFE_GET_BLOCKS(left)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
stdev->mutable_data<float>(),
mean->mutable_data<float>(),
epsilon_);
}
// out[i, j] = (in[i,j] - mu[i]) / (sigma[i])
hipLaunchKernelGGL(( normalizeKernel),
dim3(CAFFE_GET_BLOCKS(left)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
right,
left * right,
input.data<float>(),
mean->data<float>(),
stdev->data<float>(),
output->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(LayerNorm, LayerNormOp<CUDAContext>);
namespace {
// x : [N, D]
// y : [N, 1]
// z : [N, D]
// (x - broadcast(y)) * z
__global__ void zTimesXminusYbroadcast(
int N,
int D,
const float* x,
const float* y,
const float* z,
float* out) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
out[i] = (x[i] - y[i / D]) * z[i];
}
}
__global__ void normalizeByNegStdev(
int N,
bool var,
const float* x,
const float* stdev,
float* out) {
if (var) {
CUDA_1D_KERNEL_LOOP(i, N) {
out[i] = (-1.0f * x[i]) / (stdev[i] * stdev[i]);
}
} else {
CUDA_1D_KERNEL_LOOP(i, N) {
out[i] = (-1.0f * x[i]) / (stdev[i]);
}
}
}
__global__ void gradientMegaKernel(
int N,
int D,
const float* stdev,
const float* X,
const float* dstdev,
const float* dmean,
const float* dout,
float* out) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
out[i] = 1.0f / stdev[i / D] * dout[i] +
X[i] / (D * stdev[i / D]) * dstdev[i / D] + 1.0f / D * dmean[i / D];
}
}
#define PRINT(X, N, D) printTensor >> (X, N, D)
} // namespace
template <>
template <>
bool LayerNormGradientOp<CUDAContext>::DoRunWithType<float>() {
const auto& dout = Input(0);
const auto& norm_outputs = Input(1);
const auto& means = Input(2);
const auto& stdev = Input(3);
const auto& norm_inputs = Input(4);
auto* ginput = Output(0);
const auto canonical_axis = norm_inputs.canonical_axis_index(axis_);
const unsigned long left = norm_inputs.size_to_dim(canonical_axis);
const unsigned long right = norm_inputs.size_from_dim(canonical_axis);
ginput->ResizeLike(norm_inputs);
std::vector<TIndex> stats_dims(
norm_inputs.dims().begin(), norm_inputs.dims().begin() + canonical_axis);
stats_dims.push_back(1);
dmean_.Resize(stats_dims);
dstdev_.Resize(stats_dims);
gscratch_.Resize(std::vector<size_t>{left, right});
std::vector<int> segs(left + 1);
std::iota(segs.begin(), segs.end(), 0);
std::transform(
segs.begin(),
segs.end(),
segs.begin(),
std::bind1st(std::multiplies<int>(), right));
seg_indices_.Resize(vector<size_t>{segs.size()});
context_.CopyBytesFromCPU(
sizeof(int) * segs.size(),
static_cast<void*>(segs.data()),
static_cast<void*>(seg_indices_.mutable_data<int>()));
// Calculate gradient of the standard deviation
// temp1 = (x - mean) * dout
hipLaunchKernelGGL(( zTimesXminusYbroadcast),
dim3(CAFFE_GET_BLOCKS(left * right)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
right,
norm_inputs.data<float>(),
means.data<float>(),
dout.data<float>(),
gscratch_.mutable_data<float>());
dstdev_.Resize(vector<size_t>{left, 1});
// dstdev = reduce(temp1)
allocScratchAndReduce(
gscratch_.data<float>(),
dstdev_.mutable_data<float>(),
left,
seg_indices_.mutable_data<int>(),
&scratch_,
context_.cuda_stream());
// dstdev = -dstdev / sqrt(stdev)
hipLaunchKernelGGL(( normalizeByNegStdev),
dim3(CAFFE_GET_BLOCKS(left)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
true,
dstdev_.data<float>(),
stdev.data<float>(),
dstdev_.mutable_data<float>());
// Calculate gradient of the mean
// dmean = reduce(dout)
allocScratchAndReduce(
dout.data<float>(),
dmean_.mutable_data<float>(),
left,
seg_indices_.mutable_data<int>(),
&scratch_,
context_.cuda_stream());
// mean * stdev
math::Mul(
left,
means.data<float>(),
dstdev_.data<float>(),
gscratch_.mutable_data<float>(),
&context_);
// [\sum dout] + mean * stdev
math::Add(
left,
dmean_.data<float>(),
gscratch_.data<float>(),
dmean_.mutable_data<float>(),
&context_);
// -1 / std * [[\sum dout] + mean * stdev]
hipLaunchKernelGGL(( normalizeByNegStdev),
dim3(CAFFE_GET_BLOCKS(left)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
false,
dmean_.data<float>(),
stdev.data<float>(),
dmean_.mutable_data<float>());
// Calculate gradient of input
hipLaunchKernelGGL(( gradientMegaKernel),
dim3(CAFFE_GET_BLOCKS(left)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
right,
stdev.data<float>(),
norm_inputs.data<float>(),
dstdev_.data<float>(),
dmean_.data<float>(),
dout.data<float>(),
ginput->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(LayerNormGradient, LayerNormGradientOp<CUDAContext>);
} // namespace caffe2
|
1ef75ae6100d828ebd1ca6c27306aaeae1734fdd.cu
|
#include "caffe2/operators/layer_norm_op.h"
#include <cub/cub.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
// X = X - Y^2
__global__ void sqrtXMinusYSquaredKernel(
const int N,
float* x,
const float* y,
const float epsilon) {
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = sqrtf(x[i] - y[i] * y[i] + epsilon);
}
}
// out[i, j] = (X[i, j] - mu[i]) / sigma[i]
__global__ void normalizeKernel(
const int row_dim,
const int N,
const float* x,
const float* mu,
const float* sigma,
float* out) {
CUDA_1D_KERNEL_LOOP(i, N) {
out[i] = (x[i] - mu[i / row_dim]) / (sigma[i / row_dim]);
}
}
template <typename InputIterator_t>
void allocScratchAndReduce(
InputIterator_t input,
float* output,
int num_segments,
int* seg_indices,
Tensor* scratch,
cudaStream_t stream) {
size_t temp_storage_bytes;
cub::DeviceSegmentedReduce::Sum(
nullptr, // To retrieve required temporary storage size
temp_storage_bytes, // size_t &temp_storage_bytes
input, // InputIteratorT d_i
output, // OutputIteratorT d_out
num_segments, // int num_segments
seg_indices, // int *d_begin_offsets
seg_indices + 1, // int *d_end_offsets
stream // cudaStream_t stream=0
);
size_t temp_storage_floats = temp_storage_bytes / sizeof(float) +
(temp_storage_bytes % sizeof(float) ? 1 : 0);
scratch->Resize(vector<size_t>{temp_storage_floats});
cub::DeviceSegmentedReduce::Sum(
scratch->template mutable_data<float>(), // To retrieve required temporary
// storage size
temp_storage_bytes, // size_t &temp_storage_bytes
input, // InputIteratorT d_i
output, // OutputIteratorT d_out
num_segments, // int num_segments
seg_indices, // int *d_begin_offsets
seg_indices + 1, // int *d_end_offsets
stream // cudaStream_t stream=0
);
}
} // namespace
template <>
template <>
bool LayerNormOp<CUDAContext>::DoRunWithType<float>() {
const auto& input = Input(0);
auto* output = Output(0);
auto* mean = Output(1);
auto* stdev = Output(2);
CAFFE_ENFORCE_GE(input.dims().size(), 2, "LayerNorm requires input dim >= 2");
const auto canonical_axis = input.canonical_axis_index(axis_);
const int left = input.size_to_dim(canonical_axis);
const int right = input.size_from_dim(canonical_axis);
output->ResizeLike(input);
std::vector<TIndex> stats_dims(
input.dims().begin(), input.dims().begin() + canonical_axis);
stats_dims.push_back(1);
mean->Resize(stats_dims);
stdev->Resize(stats_dims);
std::vector<int> segs(left + 1);
std::iota(segs.begin(), segs.end(), 0);
std::transform(
segs.begin(),
segs.end(),
segs.begin(),
std::bind1st(std::multiplies<int>(), right));
seg_indices_.Resize(vector<size_t>{segs.size()});
context_.CopyBytesFromCPU(
sizeof(int) * segs.size(),
static_cast<void*>(segs.data()),
static_cast<void*>(seg_indices_.mutable_data<int>()));
if (right == 1) {
mean->CopyFrom(input);
mean->Resize(stats_dims);
math::Set<float, CUDAContext>(
left, sqrtf(epsilon_), stdev->mutable_data<float>(), &context_);
} else {
// Calculate row-wise means
// First stage: sum up feature vectors
allocScratchAndReduce(
input.data<float>(),
mean->mutable_data<float>(),
left,
seg_indices_.mutable_data<int>(),
&scratch_,
context_.cuda_stream());
// Second stage: Normalize by feature vector dim
math::Scale<float, CUDAContext>(
left,
1.0f / right,
mean->mutable_data<float>(),
mean->mutable_data<float>(),
&context_);
// Calculate row-wise standard deviation
// First stage: sum up row-wise squared values
SqrTransform<float> transform;
cub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
input.data<float>(), transform);
allocScratchAndReduce(
it,
stdev->mutable_data<float>(),
left,
seg_indices_.mutable_data<int>(),
&scratch_,
context_.cuda_stream());
// Second stage: Normalize by feature vector dim
math::Scale<float, CUDAContext>(
left,
1.0f / right,
stdev->mutable_data<float>(),
stdev->mutable_data<float>(),
&context_);
// stddev = sqrt(E(x^2) - E(x)^2 + epsilon)
sqrtXMinusYSquaredKernel<<<
CAFFE_GET_BLOCKS(left),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
stdev->mutable_data<float>(),
mean->mutable_data<float>(),
epsilon_);
}
// out[i, j] = (in[i,j] - mu[i]) / (sigma[i])
normalizeKernel<<<
CAFFE_GET_BLOCKS(left),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
right,
left * right,
input.data<float>(),
mean->data<float>(),
stdev->data<float>(),
output->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(LayerNorm, LayerNormOp<CUDAContext>);
namespace {
// x : [N, D]
// y : [N, 1]
// z : [N, D]
// (x - broadcast(y)) * z
__global__ void zTimesXminusYbroadcast(
int N,
int D,
const float* x,
const float* y,
const float* z,
float* out) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
out[i] = (x[i] - y[i / D]) * z[i];
}
}
__global__ void normalizeByNegStdev(
int N,
bool var,
const float* x,
const float* stdev,
float* out) {
if (var) {
CUDA_1D_KERNEL_LOOP(i, N) {
out[i] = (-1.0f * x[i]) / (stdev[i] * stdev[i]);
}
} else {
CUDA_1D_KERNEL_LOOP(i, N) {
out[i] = (-1.0f * x[i]) / (stdev[i]);
}
}
}
__global__ void gradientMegaKernel(
int N,
int D,
const float* stdev,
const float* X,
const float* dstdev,
const float* dmean,
const float* dout,
float* out) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
out[i] = 1.0f / stdev[i / D] * dout[i] +
X[i] / (D * stdev[i / D]) * dstdev[i / D] + 1.0f / D * dmean[i / D];
}
}
#define PRINT(X, N, D) printTensor >> (X, N, D)
} // namespace
template <>
template <>
bool LayerNormGradientOp<CUDAContext>::DoRunWithType<float>() {
const auto& dout = Input(0);
const auto& norm_outputs = Input(1);
const auto& means = Input(2);
const auto& stdev = Input(3);
const auto& norm_inputs = Input(4);
auto* ginput = Output(0);
const auto canonical_axis = norm_inputs.canonical_axis_index(axis_);
const unsigned long left = norm_inputs.size_to_dim(canonical_axis);
const unsigned long right = norm_inputs.size_from_dim(canonical_axis);
ginput->ResizeLike(norm_inputs);
std::vector<TIndex> stats_dims(
norm_inputs.dims().begin(), norm_inputs.dims().begin() + canonical_axis);
stats_dims.push_back(1);
dmean_.Resize(stats_dims);
dstdev_.Resize(stats_dims);
gscratch_.Resize(std::vector<size_t>{left, right});
std::vector<int> segs(left + 1);
std::iota(segs.begin(), segs.end(), 0);
std::transform(
segs.begin(),
segs.end(),
segs.begin(),
std::bind1st(std::multiplies<int>(), right));
seg_indices_.Resize(vector<size_t>{segs.size()});
context_.CopyBytesFromCPU(
sizeof(int) * segs.size(),
static_cast<void*>(segs.data()),
static_cast<void*>(seg_indices_.mutable_data<int>()));
// Calculate gradient of the standard deviation
// temp1 = (x - mean) * dout
zTimesXminusYbroadcast<<<
CAFFE_GET_BLOCKS(left * right),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
right,
norm_inputs.data<float>(),
means.data<float>(),
dout.data<float>(),
gscratch_.mutable_data<float>());
dstdev_.Resize(vector<size_t>{left, 1});
// dstdev = reduce(temp1)
allocScratchAndReduce(
gscratch_.data<float>(),
dstdev_.mutable_data<float>(),
left,
seg_indices_.mutable_data<int>(),
&scratch_,
context_.cuda_stream());
// dstdev = -dstdev / sqrt(stdev)
normalizeByNegStdev<<<
CAFFE_GET_BLOCKS(left),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
true,
dstdev_.data<float>(),
stdev.data<float>(),
dstdev_.mutable_data<float>());
// Calculate gradient of the mean
// dmean = reduce(dout)
allocScratchAndReduce(
dout.data<float>(),
dmean_.mutable_data<float>(),
left,
seg_indices_.mutable_data<int>(),
&scratch_,
context_.cuda_stream());
// mean * stdev
math::Mul(
left,
means.data<float>(),
dstdev_.data<float>(),
gscratch_.mutable_data<float>(),
&context_);
// [\sum dout] + mean * stdev
math::Add(
left,
dmean_.data<float>(),
gscratch_.data<float>(),
dmean_.mutable_data<float>(),
&context_);
// -1 / std * [[\sum dout] + mean * stdev]
normalizeByNegStdev<<<
CAFFE_GET_BLOCKS(left),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
false,
dmean_.data<float>(),
stdev.data<float>(),
dmean_.mutable_data<float>());
// Calculate gradient of input
gradientMegaKernel<<<
CAFFE_GET_BLOCKS(left),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
right,
stdev.data<float>(),
norm_inputs.data<float>(),
dstdev_.data<float>(),
dmean_.data<float>(),
dout.data<float>(),
ginput->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(LayerNormGradient, LayerNormGradientOp<CUDAContext>);
} // namespace caffe2
|
0d17693115f1561d67aabe450d4998840a3eb8b2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define IN_spa
// #include <hipcub/hipcub.hpp>
// #include "gpu.cuh"
// #include "advance_p_gpu.cuh"
// #include "gpu_util_hip.cuh"
#ifdef __USE_LESS_
#error "this is useless"
__device__ int move_p_gpu(particle_t *p, particle_t *p_global, particle_mover_t *pm,
float *a, const int64_t *g_neighbor,
int64_t g_rangel, int64_t g_rangeh, const float qsp) {
float s_midx, s_midy, s_midz;
float s_dispx, s_dispy, s_dispz;
float s_dir[3];
float v0, v1, v2, v3, v4, v5, q;
int axis, face;
int64_t neighbor;
q = qsp * p->w;
for (;;) {
s_midx = p->dx;
s_midy = p->dy;
s_midz = p->dz;
s_dispx = pm->dispx;
s_dispy = pm->dispy;
s_dispz = pm->dispz;
s_dir[0] = (s_dispx > 0.0f) ? 1.0f : -1.0f;
s_dir[1] = (s_dispy > 0.0f) ? 1.0f : -1.0f;
s_dir[2] = (s_dispz > 0.0f) ? 1.0f : -1.0f;
// Compute the twice the fractional distance to each potential
// streak/cell face intersection.
v0 = (s_dispx == 0.0f) ? 3.4e38f : (s_dir[0] - s_midx) / s_dispx;
v1 = (s_dispy == 0.0f) ? 3.4e38f : (s_dir[1] - s_midy) / s_dispy;
v2 = (s_dispz == 0.0f) ? 3.4e38f : (s_dir[2] - s_midz) / s_dispz;
// Determine the fractional length and axis of current streak. The
// streak ends on either the first face intersected by the
// particle track or at the end of the particle track.
//
// axis 0,1 or 2 ... streak ends on a x,y or z-face respectively
// axis 3 ... streak ends at end of the particle track
v3 = 2.0f, axis = 3;
if (v0 < v3) v3 = v0, axis = 0;
if (v1 < v3) v3 = v1, axis = 1;
if (v2 < v3) v3 = v2, axis = 2;
v3 *= 0.5f;
// Compute the midpoint and the normalized displacement of the streak
s_dispx *= v3;
s_dispy *= v3;
s_dispz *= v3;
s_midx += s_dispx;
s_midy += s_dispy;
s_midz += s_dispz;
// Accumulate the streak. Note: accumulator values are 4 times
// the total physical charge that passed through the appropriate
// current quadrant in a time-step
v5 = q * s_dispx * s_dispy * s_dispz * (1. / 3.);
#define accumulate_j(X, Y, Z) \
v4 = q * s_disp##X; /* v2 = q ux */ \
v1 = v4 * s_mid##Y; /* v1 = q ux dy */ \
v0 = v4 - v1; /* v0 = q ux (1-dy) */ \
v1 += v4; /* v1 = q ux (1+dy) */ \
v4 = 1 + s_mid##Z; /* v4 = 1+dz */ \
v2 = v0 * v4; /* v2 = q ux (1-dy)(1+dz) */ \
v3 = v1 * v4; /* v3 = q ux (1+dy)(1+dz) */ \
v4 = 1 - s_mid##Z; /* v4 = 1-dz */ \
v0 *= v4; /* v0 = q ux (1-dy)(1-dz) */ \
v1 *= v4; /* v1 = q ux (1+dy)(1-dz) */ \
v0 += v5; /* v0 = q ux [ (1-dy)(1-dz) + uy*uz/3 ] */ \
v1 -= v5; /* v1 = q ux [ (1+dy)(1-dz) - uy*uz/3 ] */ \
v2 -= v5; /* v2 = q ux [ (1-dy)(1+dz) - uy*uz/3 ] */ \
v3 += v5; /* v3 = q ux [ (1+dy)(1+dz) + uy*uz/3 ] */ \
a[0] += v0; \
a[1] += v1; \
a[2] += v2; \
a[3] += v3
accumulate_j(x, y, z);
a += 4;
accumulate_j(y, z, x);
a += 4;
accumulate_j(z, x, y);
#undef accumulate_j
// Compute the remaining particle displacment
pm->dispx -= s_dispx;
pm->dispy -= s_dispy;
pm->dispz -= s_dispz;
// Compute the new particle offset
p->dx += s_dispx + s_dispx;
p->dy += s_dispy + s_dispy;
p->dz += s_dispz + s_dispz;
// If an end streak, return success (should be ~50% of the time)
if (axis == 3) break;
// Determine if the particle crossed into a local cell or if it
// hit a boundary and convert the coordinate system accordingly.
// Note: Crossing into a local cell should happen ~50% of the
// time; hitting a boundary is usually a rare event. Note: the
// entry / exit coordinate for the particle is guaranteed to be
// +/-1 _exactly_ for the particle.
v0 = s_dir[axis];
(&(p->dx))[axis] = v0; // Avoid roundoff fiascos--put the particle
// _exactly_ on the boundary.
face = axis;
if (v0 > 0) face += 3;
neighbor = g_neighbor[6 * p->i + face];
if (UNLIKELY(neighbor == reflect_particles)) {
// Hit a reflecting boundary condition. Reflect the particle
// momentum and remaining displacement and keep moving the
// particle.
(&(p->ux))[axis] = -(&(p->ux))[axis];
(&(pm->dispx))[axis] = -(&(pm->dispx))[axis];
continue;
}
if (UNLIKELY(neighbor < g_rangel || neighbor > g_rangeh)) {
// Cannot handle the boundary condition here. Save the updated
// particle position, face it hit and update the remaining
// displacement in the particle mover.
p->i = 8 * p->i + face;
return 1; // Return "mover still in use"
}
// Crossed into a normal voxel. Update the voxel index, convert the
// particle coordinate system and keep moving the particle.
p->i = neighbor - g_rangel; // Compute local index of neighbor
(&(p->dx))[axis] = -v0; // Convert coordinate system
}
return 0; // Return "mover not in use"
}
#define timer_start(elt) \
int timer_##elt = clock();
#define timer_end(elt) \
timer_##elt = clock() - timer_##elt; \
if (threadIdx.x == 0) printf(#elt ": %d\n", timer_##elt);
// Total __shared__ size = 11298 * sizeof(int)
// #define BLOCK_SIZE 512
#define SHARE_MAX_VOXEL_SIZE 2 // 18
#define SHARE_MAX_PM_SIZE 10 // > (1024 * 0.2%) // 10 * 4
__inline__ __device__ int warpAllReduceSum(int val) {
for (int mask = warpSize / 2; mask > 0; mask /= 2)
val += __shfl_xor(val, mask);
return val;
}
__global__ void advance_p_gpu(advance_p_gpu_args args) {
const int block_rank = blockIdx.x;
const int n_block = gridDim.x;
const int thread_rank = threadIdx.x;
const int n_thread = blockDim.x;
const int block_size = args.block_size;
const float qdt_2mc = args.qdt_2mc;
const float cdt_dx = args.cdt_dx;
const float cdt_dy = args.cdt_dy;
const float cdt_dz = args.cdt_dz;
const float qsp = args.qsp;
const float one = 1.0;
const float one_third = 1.0 / 3.0;
const float two_fifteenths = 2.0 / 15.0;
float dx, dy, dz, ux, uy, uz, q;
float hax, hay, haz, cbx, cby, cbz;
float v0, v1, v2, v3, v4, v5;
float a[SHARE_MAX_VOXEL_SIZE][12] = {0};
int itmp, n, nm, max_nm;
particle_mover_t local_pm;
__shared__ int f_shared_index[2];
interpolator_t f_shared[2]; // assume only 2 kinds of voxels
GPU_DISTRIBUTE(args.np, block_size, block_rank, itmp, n);
particle_t *p_global = args.p0 + itmp;
const interpolator_t *f_global = args.f0;
// if (thread_rank == 0) {
// particle_t p = p_global[0];
// f_shared_index[0] = p.i;
// f_shared[0] = f_global[p.i];
// }
// if (thread_rank == 1) {
// particle_t p = p_global[n - 1];
// f_shared_index[1] = p.i;
// f_shared[1] = f_global[p.i];
// }
__syncthreads();
int ii = 0;
interpolator_t f;
int cnt = 0;
for (int i = 0; i < n; i += n_thread) {
if (thread_rank < n) {
particle_t p = p_global[i + thread_rank];
interpolator_t f = f_global[p.i];
// if (f_shared_index[ii] != p.i){
// cnt ++;
// if(cnt > 2)printf("%d\n", cnt);
// while(ii < SHARE_MAX_VOXEL_SIZE && f_shared_index[ii] != p.i) ii++;
// f = f_shared[ii];
// }
dx = p.dx; // Load position
dy = p.dy;
dz = p.dz;
hax = qdt_2mc *
((f.ex + dy * f.dexdy) + dz * (f.dexdz + dy * f.d2exdydz));
hay = qdt_2mc *
((f.ey + dz * f.deydz) + dx * (f.deydx + dz * f.d2eydzdx));
haz = qdt_2mc *
((f.ez + dx * f.dezdx) + dy * (f.dezdy + dx * f.d2ezdxdy));
cbx = f.cbx + dx * f.dcbxdx; // Interpolate B
cby = f.cby + dy * f.dcbydy;
cbz = f.cbz + dz * f.dcbzdz;
ux = p.ux; // Load momentum
uy = p.uy;
uz = p.uz;
q = p.w;
ux += hax; // Half advance E
uy += hay;
uz += haz;
v0 = qdt_2mc /
sqrtf(one + (ux * ux + (uy * uy + uz * uz)));
// Boris - scalars
v1 = cbx * cbx + (cby * cby + cbz * cbz);
v2 = (v0 * v0) * v1;
v3 = v0 * (one + v2 * (one_third + v2 * two_fifteenths));
v4 = v3 / (one + v1 * (v3 * v3));
v4 += v4;
v0 = ux + v3 * (uy * cbz - uz * cby); // Boris - uprime
v1 = uy + v3 * (uz * cbx - ux * cbz);
v2 = uz + v3 * (ux * cby - uy * cbx);
ux += v4 * (v1 * cbz - v2 * cby); // Boris - rotation
uy += v4 * (v2 * cbx - v0 * cbz);
uz += v4 * (v0 * cby - v1 * cbx);
ux += hax; // Half advance E
uy += hay;
uz += haz;
p.ux = ux; // Store momentum
p.uy = uy;
p.uz = uz;
v0 = one / sqrtf(one + (ux * ux + (uy * uy + uz * uz)));
// Get norm displacement
ux *= cdt_dx;
uy *= cdt_dy;
uz *= cdt_dz;
ux *= v0;
uy *= v0;
uz *= v0;
v0 = dx + ux; // Streak midpoint (inbnds)
v1 = dy + uy;
v2 = dz + uz;
v3 = v0 + ux; // New position
v4 = v1 + uy;
v5 = v2 + uz;
// FIXME-KJB: COULD SHORT CIRCUIT ACCUMULATION IN THE CASE WHERE QSP==0!
if (v3 <= one && v4 <= one && v5 <= one && // Check if inbnds
-v3 <= one && -v4 <= one && -v5 <= one) {
q *= qsp;
p.dx = v3; // Store new position
p.dy = v4;
p.dz = v5;
dx = v0; // Streak midpoint
dy = v1;
dz = v2;
v5 = q * ux * uy * uz * one_third; // Compute correction
#define ACCUMULATE_J(X, Y, Z, offset) \
v4 = q * u##X; /* v2 = q ux */ \
v1 = v4 * d##Y; /* v1 = q ux dy */ \
v0 = v4 - v1; /* v0 = q ux (1-dy) */ \
v1 += v4; /* v1 = q ux (1+dy) */ \
v4 = one + d##Z; /* v4 = 1+dz */ \
v2 = v0 * v4; /* v2 = q ux (1-dy)(1+dz) */ \
v3 = v1 * v4; /* v3 = q ux (1+dy)(1+dz) */ \
v4 = one - d##Z; /* v4 = 1-dz */ \
v0 *= v4; /* v0 = q ux (1-dy)(1-dz) */ \
v1 *= v4; /* v1 = q ux (1+dy)(1-dz) */ \
v0 += v5; /* v0 = q ux [ (1-dy)(1-dz) + uy*uz/3 ] */ \
v1 -= v5; /* v1 = q ux [ (1+dy)(1-dz) - uy*uz/3 ] */ \
v2 -= v5; /* v2 = q ux [ (1-dy)(1+dz) - uy*uz/3 ] */ \
v3 += v5; /* v3 = q ux [ (1+dy)(1+dz) + uy*uz/3 ] */ \
a[ii][offset + 0] = v0; \
a[ii][offset + 1] = v1; \
a[ii][offset + 2] = v2; \
a[ii][offset + 3] = v3;
// timer_start(save_to_local_a); // 9920
ACCUMULATE_J(x, y, z, 0);
ACCUMULATE_J(y, z, x, 4);
ACCUMULATE_J(z, x, y, 8);
// timer_end(save_to_local_a);
#undef ACCUMULATE_J
}
else {
local_pm.dispx = ux;
local_pm.dispy = uy;
local_pm.dispz = uz;
local_pm.i = itmp + i + thread_rank;
// __device__ int move_p_gpu(particle_t *p0, particle_mover_t *pm,
// accumulator_t *a0, const int64_t *g_neighbor,
// int64_t g_rangel, int64_t g_rangeh, const float qsp) {
/*if (move_p_gpu(p0, &local_pm, a0, g, qsp)) // Unlikely
{
if (nm < max_nm) {
pm[nm++] = local_pm[0];
}
else {
itmp++; // Unlikely
}
}*/
}
p_global[i + thread_rank] = p;
}
}
__syncthreads();
typedef hipcub::WarpReduce<float> WarpReduce;
__shared__ typename WarpReduce::TempStorage temp_storage;
float my_a[SHARE_MAX_VOXEL_SIZE][12];
__shared__ float res[SHARE_MAX_VOXEL_SIZE][12];
#pragma unroll
for (int i = 0; i < SHARE_MAX_VOXEL_SIZE; i++) {
#pragma unroll
for (int j = 0; j < 12; j++) {
float aggregate = WarpReduce(temp_storage).Sum(my_a[i][j]);
if (thread_rank == 0) res[i][j] = aggregate;
__syncthreads();
}
}
accumulator_t *a_global = args.a0;
for (int i = 0; i < SHARE_MAX_VOXEL_SIZE; i++) {
if (thread_rank < 12) {
float real_a = res[i][thread_rank];
atomicAdd(((float *)a_global + f_shared_index[i]) + thread_rank,
real_a);
}
}
/* int my_a[SHARE_MAX_VOXEL_SIZE][12];
__shared__ int res[SHARE_MAX_VOXEL_SIZE][12];
const float two_p_28 = (float)(1<<28);
#pragma unroll
for(int i = 0;i < SHARE_MAX_VOXEL_SIZE; i++){
#pragma unroll
for(int j = 0;j < 12;j++){
my_a[i][j] = a[i][j] * two_p_28;
}
}
#pragma unroll
for(int i = 0;i < SHARE_MAX_VOXEL_SIZE; i++){
#pragma unroll
for(int j = 0;j < 12;j++){
// float aggregate = WarpReduce(temp_storage).Sum(my_a[i][j]);
int aggregate = warpAllReduceSum(my_a[i][j]);
if(thread_rank == 0) res[i][j] = aggregate;
__syncthreads();
}
}
accumulator_t *a_global = args.a0;
for(int i = 0;i < SHARE_MAX_VOXEL_SIZE; i++){
if (thread_rank < 12) {
float real_a = (float)(res[i][thread_rank] >> 28);
atomicAdd( ((float *)a_global + f_shared_index[i]) + thread_rank ,
real_a );
}
}*/
}
#endif // __useless
|
0d17693115f1561d67aabe450d4998840a3eb8b2.cu
|
#define IN_spa
// #include <cub/cub.cuh>
// #include "gpu.cuh"
// #include "advance_p_gpu.cuh"
// #include "gpu_util.cuh"
#ifdef __USE_LESS_
#error "this is useless"
__device__ int move_p_gpu(particle_t *p, particle_t *p_global, particle_mover_t *pm,
float *a, const int64_t *g_neighbor,
int64_t g_rangel, int64_t g_rangeh, const float qsp) {
float s_midx, s_midy, s_midz;
float s_dispx, s_dispy, s_dispz;
float s_dir[3];
float v0, v1, v2, v3, v4, v5, q;
int axis, face;
int64_t neighbor;
q = qsp * p->w;
for (;;) {
s_midx = p->dx;
s_midy = p->dy;
s_midz = p->dz;
s_dispx = pm->dispx;
s_dispy = pm->dispy;
s_dispz = pm->dispz;
s_dir[0] = (s_dispx > 0.0f) ? 1.0f : -1.0f;
s_dir[1] = (s_dispy > 0.0f) ? 1.0f : -1.0f;
s_dir[2] = (s_dispz > 0.0f) ? 1.0f : -1.0f;
// Compute the twice the fractional distance to each potential
// streak/cell face intersection.
v0 = (s_dispx == 0.0f) ? 3.4e38f : (s_dir[0] - s_midx) / s_dispx;
v1 = (s_dispy == 0.0f) ? 3.4e38f : (s_dir[1] - s_midy) / s_dispy;
v2 = (s_dispz == 0.0f) ? 3.4e38f : (s_dir[2] - s_midz) / s_dispz;
// Determine the fractional length and axis of current streak. The
// streak ends on either the first face intersected by the
// particle track or at the end of the particle track.
//
// axis 0,1 or 2 ... streak ends on a x,y or z-face respectively
// axis 3 ... streak ends at end of the particle track
v3 = 2.0f, axis = 3;
if (v0 < v3) v3 = v0, axis = 0;
if (v1 < v3) v3 = v1, axis = 1;
if (v2 < v3) v3 = v2, axis = 2;
v3 *= 0.5f;
// Compute the midpoint and the normalized displacement of the streak
s_dispx *= v3;
s_dispy *= v3;
s_dispz *= v3;
s_midx += s_dispx;
s_midy += s_dispy;
s_midz += s_dispz;
// Accumulate the streak. Note: accumulator values are 4 times
// the total physical charge that passed through the appropriate
// current quadrant in a time-step
v5 = q * s_dispx * s_dispy * s_dispz * (1. / 3.);
#define accumulate_j(X, Y, Z) \
v4 = q * s_disp##X; /* v2 = q ux */ \
v1 = v4 * s_mid##Y; /* v1 = q ux dy */ \
v0 = v4 - v1; /* v0 = q ux (1-dy) */ \
v1 += v4; /* v1 = q ux (1+dy) */ \
v4 = 1 + s_mid##Z; /* v4 = 1+dz */ \
v2 = v0 * v4; /* v2 = q ux (1-dy)(1+dz) */ \
v3 = v1 * v4; /* v3 = q ux (1+dy)(1+dz) */ \
v4 = 1 - s_mid##Z; /* v4 = 1-dz */ \
v0 *= v4; /* v0 = q ux (1-dy)(1-dz) */ \
v1 *= v4; /* v1 = q ux (1+dy)(1-dz) */ \
v0 += v5; /* v0 = q ux [ (1-dy)(1-dz) + uy*uz/3 ] */ \
v1 -= v5; /* v1 = q ux [ (1+dy)(1-dz) - uy*uz/3 ] */ \
v2 -= v5; /* v2 = q ux [ (1-dy)(1+dz) - uy*uz/3 ] */ \
v3 += v5; /* v3 = q ux [ (1+dy)(1+dz) + uy*uz/3 ] */ \
a[0] += v0; \
a[1] += v1; \
a[2] += v2; \
a[3] += v3
accumulate_j(x, y, z);
a += 4;
accumulate_j(y, z, x);
a += 4;
accumulate_j(z, x, y);
#undef accumulate_j
// Compute the remaining particle displacment
pm->dispx -= s_dispx;
pm->dispy -= s_dispy;
pm->dispz -= s_dispz;
// Compute the new particle offset
p->dx += s_dispx + s_dispx;
p->dy += s_dispy + s_dispy;
p->dz += s_dispz + s_dispz;
// If an end streak, return success (should be ~50% of the time)
if (axis == 3) break;
// Determine if the particle crossed into a local cell or if it
// hit a boundary and convert the coordinate system accordingly.
// Note: Crossing into a local cell should happen ~50% of the
// time; hitting a boundary is usually a rare event. Note: the
// entry / exit coordinate for the particle is guaranteed to be
// +/-1 _exactly_ for the particle.
v0 = s_dir[axis];
(&(p->dx))[axis] = v0; // Avoid roundoff fiascos--put the particle
// _exactly_ on the boundary.
face = axis;
if (v0 > 0) face += 3;
neighbor = g_neighbor[6 * p->i + face];
if (UNLIKELY(neighbor == reflect_particles)) {
// Hit a reflecting boundary condition. Reflect the particle
// momentum and remaining displacement and keep moving the
// particle.
(&(p->ux))[axis] = -(&(p->ux))[axis];
(&(pm->dispx))[axis] = -(&(pm->dispx))[axis];
continue;
}
if (UNLIKELY(neighbor < g_rangel || neighbor > g_rangeh)) {
// Cannot handle the boundary condition here. Save the updated
// particle position, face it hit and update the remaining
// displacement in the particle mover.
p->i = 8 * p->i + face;
return 1; // Return "mover still in use"
}
// Crossed into a normal voxel. Update the voxel index, convert the
// particle coordinate system and keep moving the particle.
p->i = neighbor - g_rangel; // Compute local index of neighbor
(&(p->dx))[axis] = -v0; // Convert coordinate system
}
return 0; // Return "mover not in use"
}
#define timer_start(elt) \
int timer_##elt = clock();
#define timer_end(elt) \
timer_##elt = clock() - timer_##elt; \
if (threadIdx.x == 0) printf(#elt ": %d\n", timer_##elt);
// Total __shared__ size = 11298 * sizeof(int)
// #define BLOCK_SIZE 512
#define SHARE_MAX_VOXEL_SIZE 2 // 18
#define SHARE_MAX_PM_SIZE 10 // > (1024 * 0.2%) // 10 * 4
__inline__ __device__ int warpAllReduceSum(int val) {
for (int mask = warpSize / 2; mask > 0; mask /= 2)
val += __shfl_xor(val, mask);
return val;
}
__global__ void advance_p_gpu(advance_p_gpu_args args) {
const int block_rank = blockIdx.x;
const int n_block = gridDim.x;
const int thread_rank = threadIdx.x;
const int n_thread = blockDim.x;
const int block_size = args.block_size;
const float qdt_2mc = args.qdt_2mc;
const float cdt_dx = args.cdt_dx;
const float cdt_dy = args.cdt_dy;
const float cdt_dz = args.cdt_dz;
const float qsp = args.qsp;
const float one = 1.0;
const float one_third = 1.0 / 3.0;
const float two_fifteenths = 2.0 / 15.0;
float dx, dy, dz, ux, uy, uz, q;
float hax, hay, haz, cbx, cby, cbz;
float v0, v1, v2, v3, v4, v5;
float a[SHARE_MAX_VOXEL_SIZE][12] = {0};
int itmp, n, nm, max_nm;
particle_mover_t local_pm;
__shared__ int f_shared_index[2];
interpolator_t f_shared[2]; // assume only 2 kinds of voxels
GPU_DISTRIBUTE(args.np, block_size, block_rank, itmp, n);
particle_t *p_global = args.p0 + itmp;
const interpolator_t *f_global = args.f0;
// if (thread_rank == 0) {
// particle_t p = p_global[0];
// f_shared_index[0] = p.i;
// f_shared[0] = f_global[p.i];
// }
// if (thread_rank == 1) {
// particle_t p = p_global[n - 1];
// f_shared_index[1] = p.i;
// f_shared[1] = f_global[p.i];
// }
__syncthreads();
int ii = 0;
interpolator_t f;
int cnt = 0;
for (int i = 0; i < n; i += n_thread) {
if (thread_rank < n) {
particle_t p = p_global[i + thread_rank];
interpolator_t f = f_global[p.i];
// if (f_shared_index[ii] != p.i){
// cnt ++;
// if(cnt > 2)printf("%d\n", cnt);
// while(ii < SHARE_MAX_VOXEL_SIZE && f_shared_index[ii] != p.i) ii++;
// f = f_shared[ii];
// }
dx = p.dx; // Load position
dy = p.dy;
dz = p.dz;
hax = qdt_2mc *
((f.ex + dy * f.dexdy) + dz * (f.dexdz + dy * f.d2exdydz));
hay = qdt_2mc *
((f.ey + dz * f.deydz) + dx * (f.deydx + dz * f.d2eydzdx));
haz = qdt_2mc *
((f.ez + dx * f.dezdx) + dy * (f.dezdy + dx * f.d2ezdxdy));
cbx = f.cbx + dx * f.dcbxdx; // Interpolate B
cby = f.cby + dy * f.dcbydy;
cbz = f.cbz + dz * f.dcbzdz;
ux = p.ux; // Load momentum
uy = p.uy;
uz = p.uz;
q = p.w;
ux += hax; // Half advance E
uy += hay;
uz += haz;
v0 = qdt_2mc /
sqrtf(one + (ux * ux + (uy * uy + uz * uz)));
// Boris - scalars
v1 = cbx * cbx + (cby * cby + cbz * cbz);
v2 = (v0 * v0) * v1;
v3 = v0 * (one + v2 * (one_third + v2 * two_fifteenths));
v4 = v3 / (one + v1 * (v3 * v3));
v4 += v4;
v0 = ux + v3 * (uy * cbz - uz * cby); // Boris - uprime
v1 = uy + v3 * (uz * cbx - ux * cbz);
v2 = uz + v3 * (ux * cby - uy * cbx);
ux += v4 * (v1 * cbz - v2 * cby); // Boris - rotation
uy += v4 * (v2 * cbx - v0 * cbz);
uz += v4 * (v0 * cby - v1 * cbx);
ux += hax; // Half advance E
uy += hay;
uz += haz;
p.ux = ux; // Store momentum
p.uy = uy;
p.uz = uz;
v0 = one / sqrtf(one + (ux * ux + (uy * uy + uz * uz)));
// Get norm displacement
ux *= cdt_dx;
uy *= cdt_dy;
uz *= cdt_dz;
ux *= v0;
uy *= v0;
uz *= v0;
v0 = dx + ux; // Streak midpoint (inbnds)
v1 = dy + uy;
v2 = dz + uz;
v3 = v0 + ux; // New position
v4 = v1 + uy;
v5 = v2 + uz;
// FIXME-KJB: COULD SHORT CIRCUIT ACCUMULATION IN THE CASE WHERE QSP==0!
if (v3 <= one && v4 <= one && v5 <= one && // Check if inbnds
-v3 <= one && -v4 <= one && -v5 <= one) {
q *= qsp;
p.dx = v3; // Store new position
p.dy = v4;
p.dz = v5;
dx = v0; // Streak midpoint
dy = v1;
dz = v2;
v5 = q * ux * uy * uz * one_third; // Compute correction
#define ACCUMULATE_J(X, Y, Z, offset) \
v4 = q * u##X; /* v2 = q ux */ \
v1 = v4 * d##Y; /* v1 = q ux dy */ \
v0 = v4 - v1; /* v0 = q ux (1-dy) */ \
v1 += v4; /* v1 = q ux (1+dy) */ \
v4 = one + d##Z; /* v4 = 1+dz */ \
v2 = v0 * v4; /* v2 = q ux (1-dy)(1+dz) */ \
v3 = v1 * v4; /* v3 = q ux (1+dy)(1+dz) */ \
v4 = one - d##Z; /* v4 = 1-dz */ \
v0 *= v4; /* v0 = q ux (1-dy)(1-dz) */ \
v1 *= v4; /* v1 = q ux (1+dy)(1-dz) */ \
v0 += v5; /* v0 = q ux [ (1-dy)(1-dz) + uy*uz/3 ] */ \
v1 -= v5; /* v1 = q ux [ (1+dy)(1-dz) - uy*uz/3 ] */ \
v2 -= v5; /* v2 = q ux [ (1-dy)(1+dz) - uy*uz/3 ] */ \
v3 += v5; /* v3 = q ux [ (1+dy)(1+dz) + uy*uz/3 ] */ \
a[ii][offset + 0] = v0; \
a[ii][offset + 1] = v1; \
a[ii][offset + 2] = v2; \
a[ii][offset + 3] = v3;
// timer_start(save_to_local_a); // 9920
ACCUMULATE_J(x, y, z, 0);
ACCUMULATE_J(y, z, x, 4);
ACCUMULATE_J(z, x, y, 8);
// timer_end(save_to_local_a);
#undef ACCUMULATE_J
}
else {
local_pm.dispx = ux;
local_pm.dispy = uy;
local_pm.dispz = uz;
local_pm.i = itmp + i + thread_rank;
// __device__ int move_p_gpu(particle_t *p0, particle_mover_t *pm,
// accumulator_t *a0, const int64_t *g_neighbor,
// int64_t g_rangel, int64_t g_rangeh, const float qsp) {
/*if (move_p_gpu(p0, &local_pm, a0, g, qsp)) // Unlikely
{
if (nm < max_nm) {
pm[nm++] = local_pm[0];
}
else {
itmp++; // Unlikely
}
}*/
}
p_global[i + thread_rank] = p;
}
}
__syncthreads();
typedef cub::WarpReduce<float> WarpReduce;
__shared__ typename WarpReduce::TempStorage temp_storage;
float my_a[SHARE_MAX_VOXEL_SIZE][12];
__shared__ float res[SHARE_MAX_VOXEL_SIZE][12];
#pragma unroll
for (int i = 0; i < SHARE_MAX_VOXEL_SIZE; i++) {
#pragma unroll
for (int j = 0; j < 12; j++) {
float aggregate = WarpReduce(temp_storage).Sum(my_a[i][j]);
if (thread_rank == 0) res[i][j] = aggregate;
__syncthreads();
}
}
accumulator_t *a_global = args.a0;
for (int i = 0; i < SHARE_MAX_VOXEL_SIZE; i++) {
if (thread_rank < 12) {
float real_a = res[i][thread_rank];
atomicAdd(((float *)a_global + f_shared_index[i]) + thread_rank,
real_a);
}
}
/* int my_a[SHARE_MAX_VOXEL_SIZE][12];
__shared__ int res[SHARE_MAX_VOXEL_SIZE][12];
const float two_p_28 = (float)(1<<28);
#pragma unroll
for(int i = 0;i < SHARE_MAX_VOXEL_SIZE; i++){
#pragma unroll
for(int j = 0;j < 12;j++){
my_a[i][j] = a[i][j] * two_p_28;
}
}
#pragma unroll
for(int i = 0;i < SHARE_MAX_VOXEL_SIZE; i++){
#pragma unroll
for(int j = 0;j < 12;j++){
// float aggregate = WarpReduce(temp_storage).Sum(my_a[i][j]);
int aggregate = warpAllReduceSum(my_a[i][j]);
if(thread_rank == 0) res[i][j] = aggregate;
__syncthreads();
}
}
accumulator_t *a_global = args.a0;
for(int i = 0;i < SHARE_MAX_VOXEL_SIZE; i++){
if (thread_rank < 12) {
float real_a = (float)(res[i][thread_rank] >> 28);
atomicAdd( ((float *)a_global + f_shared_index[i]) + thread_rank ,
real_a );
}
}*/
}
#endif // __useless
|
15ff1e4155243990344c624b7c6d49b7087fba7b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <unordered_map>
#include <algorithm>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
class bbcuLocalHeap
{
protected:
struct heap_t
{
void *ptr;
size_t size;
heap_t(){}
heap_t(void *p, size_t sz) { ptr = p; size = sz; }
bool operator<(const heap_t &h) const
{
return size < h.size;
}
};
std::unordered_map<void*, size_t> m_allocated_map;
size_t m_allocated_size = 0;
size_t m_max_alloc_size = 0;
std::vector<heap_t> m_reserve_vec;
size_t m_reserve_size = 0;
public:
// constructor
bbcuLocalHeap() {}
// destructor
~bbcuLocalHeap()
{
BBCU_ASSERT(m_allocated_map.empty());
BBCU_ASSERT(m_allocated_size == 0);
for (auto& heap : m_reserve_vec) {
m_reserve_size -= heap.size;
// BB_CUDA_SAFE_CALL(hipFree(heap.ptr));
auto err = hipFree(heap.ptr);
if ( err == 4 ) { return; } // driver shutting down
if ( err == 29 ) { return; } // driver shutting down
if (err != hipSuccess) {
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", hipGetErrorString(err), err, __FILE__, __LINE__); \
}
}
BBCU_ASSERT(m_reserve_size == 0);
}
protected:
//
bool FreeGarbage(void)
{
if (m_reserve_vec.empty()) {
return false;
}
auto it = m_reserve_vec.begin();
m_reserve_size -= it->size;
BB_CUDA_SAFE_CALL(hipFree(it->ptr));
m_reserve_vec.erase(it);
return true;
}
public:
void* Malloc(size_t size)
{
//
for ( auto it = m_reserve_vec.begin(); it != m_reserve_vec.end(); ++it ) {
if ( it->size >= size && it->size < (size * 3 / 2) ) {
// reserve
auto h = *it;
m_reserve_vec.erase(it);
m_reserve_size -= h.size;
//
BBCU_ASSERT(m_allocated_map.count(h.ptr) == 0);
m_allocated_map[h.ptr] = h.size;
m_allocated_size += h.size;
m_max_alloc_size = ::max(m_max_alloc_size, m_allocated_size);
return h.ptr;
}
}
//
//
m_allocated_size += size;
m_max_alloc_size = ::max(m_max_alloc_size, m_allocated_size);
while ((m_allocated_size + m_reserve_size) > (m_max_alloc_size * 3 / 2) ) {
FreeGarbage();
}
//
do {
void *ptr;
hipError_t err = hipMalloc(&ptr, size);
if (err == hipSuccess) {
//
BBCU_ASSERT(m_allocated_map.count(ptr) == 0);
m_allocated_map[ptr] = size;
return ptr;
}
} while ( FreeGarbage() );
m_allocated_size -= size;
BBCU_ASSERT(0);
return nullptr;
}
void Free(void* ptr)
{
BBCU_ASSERT(m_allocated_map.count(ptr) == 1);
size_t size = m_allocated_map[ptr];
m_allocated_map.erase(ptr);
m_allocated_size -= size;
m_reserve_vec.push_back(heap_t(ptr, size));
m_reserve_size += size;
std::sort(m_reserve_vec.begin(), m_reserve_vec.end());
}
size_t GetMaxAllocSize(void)
{
return m_max_alloc_size;
}
};
static bbcuLocalHeap g_bbcu_local_heap;
BBCU_DLL_EXPORT void* bbcu_LocalHeap_Malloc(size_t size)
{
return g_bbcu_local_heap.Malloc(size);
}
BBCU_DLL_EXPORT void bbcu_LocalHeap_Free(void* ptr)
{
g_bbcu_local_heap.Free(ptr);
}
BBCU_DLL_EXPORT size_t bbcu_LocalHeap_GetMaxAllocSize(void)
{
return g_bbcu_local_heap.GetMaxAllocSize();
}
// end of file
|
15ff1e4155243990344c624b7c6d49b7087fba7b.cu
|
#include <iostream>
#include <vector>
#include <unordered_map>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
class bbcuLocalHeap
{
protected:
struct heap_t
{
void *ptr;
size_t size;
heap_t(){}
heap_t(void *p, size_t sz) { ptr = p; size = sz; }
bool operator<(const heap_t &h) const
{
return size < h.size;
}
};
std::unordered_map<void*, size_t> m_allocated_map;
size_t m_allocated_size = 0;
size_t m_max_alloc_size = 0;
std::vector<heap_t> m_reserve_vec;
size_t m_reserve_size = 0;
public:
// constructor
bbcuLocalHeap() {}
// destructor
~bbcuLocalHeap()
{
BBCU_ASSERT(m_allocated_map.empty());
BBCU_ASSERT(m_allocated_size == 0);
for (auto& heap : m_reserve_vec) {
m_reserve_size -= heap.size;
// BB_CUDA_SAFE_CALL(cudaFree(heap.ptr));
auto err = cudaFree(heap.ptr);
if ( err == 4 ) { return; } // driver shutting down
if ( err == 29 ) { return; } // driver shutting down
if (err != cudaSuccess) {
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", cudaGetErrorString(err), err, __FILE__, __LINE__); \
}
}
BBCU_ASSERT(m_reserve_size == 0);
}
protected:
// 未使用のものがあれば1つ開放
bool FreeGarbage(void)
{
if (m_reserve_vec.empty()) {
return false;
}
auto it = m_reserve_vec.begin();
m_reserve_size -= it->size;
BB_CUDA_SAFE_CALL(cudaFree(it->ptr));
m_reserve_vec.erase(it);
return true;
}
public:
void* Malloc(size_t size)
{
// 使えるものがあれば割り当て
for ( auto it = m_reserve_vec.begin(); it != m_reserve_vec.end(); ++it ) {
if ( it->size >= size && it->size < (size * 3 / 2) ) {
// reserveから取り出し
auto h = *it;
m_reserve_vec.erase(it);
m_reserve_size -= h.size;
// 割り当て済みに追加
BBCU_ASSERT(m_allocated_map.count(h.ptr) == 0);
m_allocated_map[h.ptr] = h.size;
m_allocated_size += h.size;
m_max_alloc_size = std::max(m_max_alloc_size, m_allocated_size);
return h.ptr;
}
}
// 適切なサイズのリザーブが無ければ新規取得
// 先にサイズ加算して開放を動かす
m_allocated_size += size;
m_max_alloc_size = std::max(m_max_alloc_size, m_allocated_size);
while ((m_allocated_size + m_reserve_size) > (m_max_alloc_size * 3 / 2) ) {
FreeGarbage();
}
// 新規メモリ確保
do {
void *ptr;
cudaError_t err = cudaMalloc(&ptr, size);
if (err == cudaSuccess) {
// 登録
BBCU_ASSERT(m_allocated_map.count(ptr) == 0);
m_allocated_map[ptr] = size;
return ptr;
}
} while ( FreeGarbage() );
m_allocated_size -= size;
BBCU_ASSERT(0);
return nullptr;
}
void Free(void* ptr)
{
BBCU_ASSERT(m_allocated_map.count(ptr) == 1);
size_t size = m_allocated_map[ptr];
m_allocated_map.erase(ptr);
m_allocated_size -= size;
m_reserve_vec.push_back(heap_t(ptr, size));
m_reserve_size += size;
std::sort(m_reserve_vec.begin(), m_reserve_vec.end());
}
size_t GetMaxAllocSize(void)
{
return m_max_alloc_size;
}
};
static bbcuLocalHeap g_bbcu_local_heap;
BBCU_DLL_EXPORT void* bbcu_LocalHeap_Malloc(size_t size)
{
return g_bbcu_local_heap.Malloc(size);
}
BBCU_DLL_EXPORT void bbcu_LocalHeap_Free(void* ptr)
{
g_bbcu_local_heap.Free(ptr);
}
BBCU_DLL_EXPORT size_t bbcu_LocalHeap_GetMaxAllocSize(void)
{
return g_bbcu_local_heap.GetMaxAllocSize();
}
// end of file
|
0c8aaafc65910ec4512c699bd687320a4c0971c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <ctime>
#include "nvparse.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#include <windows.h>
#else
#include <unistd.h>
#endif
int main() {
thrust::device_vector<int> d(3);
thrust::host_vector<int> h(3);
d[0] = 1;
d[1] = 2
d[2] = 3;
thrust::copy(d.begin(), d.end(), h.begin());
for(int i = 0; i < 3; i++)
std::cout << h[i] << std::endl;
/*FILE* f = fopen("lineitem.tbl", "r" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
thrust::device_vector<char> dev(fileSize);
fseek(f, 0, SEEK_SET);
char* buff;
hipHostMalloc((void**) &buff, fileSize,hipHostMallocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
thrust::copy(buff, buff+fileSize, dev.begin());
hipHostFree(buff);
auto cnt = thrust::count(dev.begin(), dev.end(), '\n');
std::cout << "There are " << cnt << " total lines in a file" << std::endl;
thrust::device_vector<int> dev_pos(cnt+1);
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)fileSize),
dev.begin(), dev_pos.begin()+1, is_break());
thrust::device_vector<char> dev_res1(cnt*15);
thrust::fill(dev_res1.begin(), dev_res1.end(), 0);
thrust::device_vector<char> dev_res2(cnt*15);
thrust::fill(dev_res2.begin(), dev_res2.end(), 0);
thrust::device_vector<char> dev_res3(cnt*15);
thrust::fill(dev_res3.begin(), dev_res3.end(), 0);
thrust::device_vector<char> dev_res4(cnt*15);
thrust::fill(dev_res4.begin(), dev_res4.end(), 0);
thrust::device_vector<char> dev_res5(cnt*15);
thrust::fill(dev_res5.begin(), dev_res5.end(), 0);
thrust::device_vector<char> dev_res6(cnt*15);
thrust::fill(dev_res6.begin(), dev_res6.end(), 0);
thrust::device_vector<char> dev_res7(cnt*15);
thrust::fill(dev_res7.begin(), dev_res7.end(), 0);
thrust::device_vector<char> dev_res8(cnt*15);
thrust::fill(dev_res8.begin(), dev_res8.end(), 0);
thrust::device_vector<char> dev_res9(cnt);
thrust::fill(dev_res9.begin(), dev_res9.end(), 0);
thrust::device_vector<char> dev_res10(cnt);
thrust::fill(dev_res10.begin(), dev_res10.end(), 0);
thrust::device_vector<char> dev_res11(cnt*10);
thrust::fill(dev_res11.begin(), dev_res11.end(), 0);
thrust::device_vector<char*> dest(11);
dest[0] = thrust::raw_pointer_cast(dev_res1.data());
dest[1] = thrust::raw_pointer_cast(dev_res2.data());
dest[2] = thrust::raw_pointer_cast(dev_res3.data());
dest[3] = thrust::raw_pointer_cast(dev_res4.data());
dest[4] = thrust::raw_pointer_cast(dev_res5.data());
dest[5] = thrust::raw_pointer_cast(dev_res6.data());
dest[6] = thrust::raw_pointer_cast(dev_res7.data());
dest[7] = thrust::raw_pointer_cast(dev_res8.data());
dest[8] = thrust::raw_pointer_cast(dev_res9.data());
dest[9] = thrust::raw_pointer_cast(dev_res10.data());
dest[10] = thrust::raw_pointer_cast(dev_res11.data());
thrust::device_vector<unsigned int> ind(11); //fields positions
ind[0] = 0;
ind[1] = 1;
ind[2] = 2;
ind[3] = 3;
ind[4] = 4;
ind[5] = 5;
ind[6] = 6;
ind[7] = 7;
ind[8] = 8;
ind[9] = 9;
ind[10] = 10;
thrust::device_vector<unsigned int> dest_len(11); //fields max lengths
dest_len[0] = 15;
dest_len[1] = 15;
dest_len[2] = 15;
dest_len[3] = 15;
dest_len[4] = 15;
dest_len[5] = 15;
dest_len[6] = 15;
dest_len[7] = 15;
dest_len[8] = 1;
dest_len[9] = 1;
dest_len[10] = 10;
thrust::device_vector<unsigned int> ind_cnt(1); //fields count
ind_cnt[0] = 10;
thrust::device_vector<char> sep(1);
sep[0] = '|';
std::clock_t start1 = std::clock();
thrust::counting_iterator<unsigned int> begin(0);
parse_functor ff((const char*)thrust::raw_pointer_cast(dev.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sep.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + cnt, ff); // now dev_pos vector contains the indexes of new line characters
thrust::device_vector<long long int> d_int(cnt);
thrust::device_vector<double> d_float(cnt);
std::cout<< "time0 " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << '\n';
//check the text results in dev_res array :
for(int i = 0; i < 100; i++)
std::cout << dev_res9[i];
std ::cout << std::endl;
for(int i = 0; i < 100; i++)
std::cout << dev_res10[i];
std ::cout << std::endl;
//binary integer results
ind_cnt[0] = 15;
gpu_atoll atoll_ff((const char*)thrust::raw_pointer_cast(dev_res3.data()),(long long int*)thrust::raw_pointer_cast(d_int.data()),
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + cnt, atoll_ff);
for(int i = 0; i < 10; i++)
std::cout << d_int[i] << std::endl;
std::cout << std::endl;
//binary float results
gpu_atof atof_ff((const char*)thrust::raw_pointer_cast(dev_res6.data()),(double*)thrust::raw_pointer_cast(d_float.data()),
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + cnt, atof_ff);
std::cout.precision(10);
for(int i = 0; i < 10; i++)
std::cout << d_int[i] << std::endl;
*/
return 0;
}
|
0c8aaafc65910ec4512c699bd687320a4c0971c9.cu
|
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <ctime>
#include "nvparse.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#include <windows.h>
#else
#include <unistd.h>
#endif
int main() {
thrust::device_vector<int> d(3);
thrust::host_vector<int> h(3);
d[0] = 1;
d[1] = 2
d[2] = 3;
thrust::copy(d.begin(), d.end(), h.begin());
for(int i = 0; i < 3; i++)
std::cout << h[i] << std::endl;
/*FILE* f = fopen("lineitem.tbl", "r" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
thrust::device_vector<char> dev(fileSize);
fseek(f, 0, SEEK_SET);
char* buff;
cudaHostAlloc((void**) &buff, fileSize,cudaHostAllocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
thrust::copy(buff, buff+fileSize, dev.begin());
cudaFreeHost(buff);
auto cnt = thrust::count(dev.begin(), dev.end(), '\n');
std::cout << "There are " << cnt << " total lines in a file" << std::endl;
thrust::device_vector<int> dev_pos(cnt+1);
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)fileSize),
dev.begin(), dev_pos.begin()+1, is_break());
thrust::device_vector<char> dev_res1(cnt*15);
thrust::fill(dev_res1.begin(), dev_res1.end(), 0);
thrust::device_vector<char> dev_res2(cnt*15);
thrust::fill(dev_res2.begin(), dev_res2.end(), 0);
thrust::device_vector<char> dev_res3(cnt*15);
thrust::fill(dev_res3.begin(), dev_res3.end(), 0);
thrust::device_vector<char> dev_res4(cnt*15);
thrust::fill(dev_res4.begin(), dev_res4.end(), 0);
thrust::device_vector<char> dev_res5(cnt*15);
thrust::fill(dev_res5.begin(), dev_res5.end(), 0);
thrust::device_vector<char> dev_res6(cnt*15);
thrust::fill(dev_res6.begin(), dev_res6.end(), 0);
thrust::device_vector<char> dev_res7(cnt*15);
thrust::fill(dev_res7.begin(), dev_res7.end(), 0);
thrust::device_vector<char> dev_res8(cnt*15);
thrust::fill(dev_res8.begin(), dev_res8.end(), 0);
thrust::device_vector<char> dev_res9(cnt);
thrust::fill(dev_res9.begin(), dev_res9.end(), 0);
thrust::device_vector<char> dev_res10(cnt);
thrust::fill(dev_res10.begin(), dev_res10.end(), 0);
thrust::device_vector<char> dev_res11(cnt*10);
thrust::fill(dev_res11.begin(), dev_res11.end(), 0);
thrust::device_vector<char*> dest(11);
dest[0] = thrust::raw_pointer_cast(dev_res1.data());
dest[1] = thrust::raw_pointer_cast(dev_res2.data());
dest[2] = thrust::raw_pointer_cast(dev_res3.data());
dest[3] = thrust::raw_pointer_cast(dev_res4.data());
dest[4] = thrust::raw_pointer_cast(dev_res5.data());
dest[5] = thrust::raw_pointer_cast(dev_res6.data());
dest[6] = thrust::raw_pointer_cast(dev_res7.data());
dest[7] = thrust::raw_pointer_cast(dev_res8.data());
dest[8] = thrust::raw_pointer_cast(dev_res9.data());
dest[9] = thrust::raw_pointer_cast(dev_res10.data());
dest[10] = thrust::raw_pointer_cast(dev_res11.data());
thrust::device_vector<unsigned int> ind(11); //fields positions
ind[0] = 0;
ind[1] = 1;
ind[2] = 2;
ind[3] = 3;
ind[4] = 4;
ind[5] = 5;
ind[6] = 6;
ind[7] = 7;
ind[8] = 8;
ind[9] = 9;
ind[10] = 10;
thrust::device_vector<unsigned int> dest_len(11); //fields max lengths
dest_len[0] = 15;
dest_len[1] = 15;
dest_len[2] = 15;
dest_len[3] = 15;
dest_len[4] = 15;
dest_len[5] = 15;
dest_len[6] = 15;
dest_len[7] = 15;
dest_len[8] = 1;
dest_len[9] = 1;
dest_len[10] = 10;
thrust::device_vector<unsigned int> ind_cnt(1); //fields count
ind_cnt[0] = 10;
thrust::device_vector<char> sep(1);
sep[0] = '|';
std::clock_t start1 = std::clock();
thrust::counting_iterator<unsigned int> begin(0);
parse_functor ff((const char*)thrust::raw_pointer_cast(dev.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sep.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + cnt, ff); // now dev_pos vector contains the indexes of new line characters
thrust::device_vector<long long int> d_int(cnt);
thrust::device_vector<double> d_float(cnt);
std::cout<< "time0 " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << '\n';
//check the text results in dev_res array :
for(int i = 0; i < 100; i++)
std::cout << dev_res9[i];
std ::cout << std::endl;
for(int i = 0; i < 100; i++)
std::cout << dev_res10[i];
std ::cout << std::endl;
//binary integer results
ind_cnt[0] = 15;
gpu_atoll atoll_ff((const char*)thrust::raw_pointer_cast(dev_res3.data()),(long long int*)thrust::raw_pointer_cast(d_int.data()),
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + cnt, atoll_ff);
for(int i = 0; i < 10; i++)
std::cout << d_int[i] << std::endl;
std::cout << std::endl;
//binary float results
gpu_atof atof_ff((const char*)thrust::raw_pointer_cast(dev_res6.data()),(double*)thrust::raw_pointer_cast(d_float.data()),
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + cnt, atof_ff);
std::cout.precision(10);
for(int i = 0; i < 10; i++)
std::cout << d_int[i] << std::endl;
*/
return 0;
}
|
583a88af4325bcb01917b5ce5fa39211aed9504e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void res_gpu( const double *A, const double *u, double *du,
const double *beta) {
*du += (*beta) * (*A) * (*u);
}
// CUDA kernel function
__global__ void op_cuda_res(
const double *__restrict ind_arg0,
double *__restrict ind_arg1,
const int *__restrict opDat1Map,
const double *__restrict arg0,
const double *arg3,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg2_l[1];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map1idx;
int map2idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<1; d++ ){
arg2_l[d] = ZERO_double;
}
map1idx = opDat1Map[n + offset_b + set_size * 1];
map2idx = opDat1Map[n + offset_b + set_size * 0];
//user-supplied kernel call
res_gpu(arg0+(n+offset_b)*1,
ind_arg0+map1idx*1,
arg2_l,
arg3);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg2_l[0] += ind_arg1[0+map2idx*1];
ind_arg1[0+map2idx*1] = arg2_l[0];
}
__syncthreads();
}
}
}
//host stub function
void op_par_loop_res(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3){
double*arg3h = (double *)arg3.data;
int nargs = 4;
op_arg args[4];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
int ninds = 2;
int inds[4] = {-1,0,1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: res\n");
}
//get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(double));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg3.data = OP_consts_h + consts_bytes;
arg3.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((double *)arg3.data)[d] = arg3h[d];
}
consts_bytes += ROUND_UP(1*sizeof(double));
mvConstArraysToDevice(consts_bytes);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
hipLaunchKernelGGL(( op_cuda_res), dim3(nblocks),dim3(nthread), 0, 0,
(double *)arg1.data_d,
(double *)arg2.data_d,
arg1.map_data_d,
(double*)arg0.data_d,
(double*)arg3.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
}
|
583a88af4325bcb01917b5ce5fa39211aed9504e.cu
|
//
// auto-generated by op2.py
//
//user function
__device__ void res_gpu( const double *A, const double *u, double *du,
const double *beta) {
*du += (*beta) * (*A) * (*u);
}
// CUDA kernel function
__global__ void op_cuda_res(
const double *__restrict ind_arg0,
double *__restrict ind_arg1,
const int *__restrict opDat1Map,
const double *__restrict arg0,
const double *arg3,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg2_l[1];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map1idx;
int map2idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<1; d++ ){
arg2_l[d] = ZERO_double;
}
map1idx = opDat1Map[n + offset_b + set_size * 1];
map2idx = opDat1Map[n + offset_b + set_size * 0];
//user-supplied kernel call
res_gpu(arg0+(n+offset_b)*1,
ind_arg0+map1idx*1,
arg2_l,
arg3);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg2_l[0] += ind_arg1[0+map2idx*1];
ind_arg1[0+map2idx*1] = arg2_l[0];
}
__syncthreads();
}
}
}
//host stub function
void op_par_loop_res(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3){
double*arg3h = (double *)arg3.data;
int nargs = 4;
op_arg args[4];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
int ninds = 2;
int inds[4] = {-1,0,1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: res\n");
}
//get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(double));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg3.data = OP_consts_h + consts_bytes;
arg3.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((double *)arg3.data)[d] = arg3h[d];
}
consts_bytes += ROUND_UP(1*sizeof(double));
mvConstArraysToDevice(consts_bytes);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
op_cuda_res<<<nblocks,nthread>>>(
(double *)arg1.data_d,
(double *)arg2.data_d,
arg1.map_data_d,
(double*)arg0.data_d,
(double*)arg3.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
}
|
2683a3f1745de36af9e04d1ca3904547b6c89902.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/FunctionOfAMatrixUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
namespace at { namespace native {
namespace {
template <int n_threads, int n_elems_per_thread, typename func_t>
C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread)
__global__ void _elemwise_kernel(int total_n_elems, func_t f) {
constexpr int total_work_block = n_threads * n_elems_per_thread;
int idx = total_work_block * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < n_elems_per_thread; ++i) {
if (idx < total_n_elems) {
f(idx);
idx += n_threads;
}
}
}
template <int n_threads, int n_elems_per_thread, typename func_t>
void _lauch_kernel(int total_n_elems, const func_t& f) {
TORCH_INTERNAL_ASSERT(
total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max()
);
dim3 block(n_threads);
constexpr int total_work_block = n_threads * n_elems_per_thread;
dim3 grid((total_n_elems + total_work_block - 1) / total_work_block);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( _elemwise_kernel<n_threads, n_elems_per_thread, func_t>)
, dim3(grid), dim3(block), 0, stream, total_n_elems, f);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t>
void _compute_linear_combination_internal_kernel(
TensorIterator& iter,
int32_t in_stride,
int32_t coeff_stride,
int32_t num_summations
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_compute_linear_combination_internal_kernel<scalar_t>(
sub_iter, in_stride, coeff_stride, num_summations
);
}
return;
}
auto offset_calc = make_offset_calculator<3>(iter);
char* __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ in_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ coeff_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
auto loop = [=]C10_DEVICE(int idx) {
auto offsets = offset_calc.get(idx);
auto* __restrict__ out_data = reinterpret_cast<scalar_t*>(
out_ptr + offsets[0]
);
auto* __restrict__ in_data = reinterpret_cast<scalar_t*>(
in_ptr + offsets[1]
);
using primitive_t = typename scalar_value_type<scalar_t>::type;
auto* __restrict__ coeff_data = reinterpret_cast<primitive_t*>(
coeff_ptr + offsets[2]
);
// perform summation
for (int32_t i = 0; i < num_summations; ++i) {
*out_data += in_data[i * in_stride] * coeff_data[i * coeff_stride];
}
};
_lauch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop);
}
void _compute_linear_combination_cuda_kernel(
TensorIterator& iter,
int64_t in_stride,
int64_t coeff_stride,
int64_t num_summations
) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"_compute_linear_combination_cuda", [&] () {
_compute_linear_combination_internal_kernel<scalar_t>(
iter, in_stride, coeff_stride, num_summations
);
}
);
}
}
REGISTER_DISPATCH(_compute_linear_combination_stub, &_compute_linear_combination_cuda_kernel);
}} // namespace at::native
|
2683a3f1745de36af9e04d1ca3904547b6c89902.cu
|
#include <ATen/native/FunctionOfAMatrixUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
namespace at { namespace native {
namespace {
template <int n_threads, int n_elems_per_thread, typename func_t>
C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread)
__global__ void _elemwise_kernel(int total_n_elems, func_t f) {
constexpr int total_work_block = n_threads * n_elems_per_thread;
int idx = total_work_block * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < n_elems_per_thread; ++i) {
if (idx < total_n_elems) {
f(idx);
idx += n_threads;
}
}
}
template <int n_threads, int n_elems_per_thread, typename func_t>
void _lauch_kernel(int total_n_elems, const func_t& f) {
TORCH_INTERNAL_ASSERT(
total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max()
);
dim3 block(n_threads);
constexpr int total_work_block = n_threads * n_elems_per_thread;
dim3 grid((total_n_elems + total_work_block - 1) / total_work_block);
auto stream = at::cuda::getCurrentCUDAStream();
_elemwise_kernel<n_threads, n_elems_per_thread, func_t>
<<<grid, block, 0, stream>>>(total_n_elems, f);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t>
void _compute_linear_combination_internal_kernel(
TensorIterator& iter,
int32_t in_stride,
int32_t coeff_stride,
int32_t num_summations
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_compute_linear_combination_internal_kernel<scalar_t>(
sub_iter, in_stride, coeff_stride, num_summations
);
}
return;
}
auto offset_calc = make_offset_calculator<3>(iter);
char* __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ in_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ coeff_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
auto loop = [=]C10_DEVICE(int idx) {
auto offsets = offset_calc.get(idx);
auto* __restrict__ out_data = reinterpret_cast<scalar_t*>(
out_ptr + offsets[0]
);
auto* __restrict__ in_data = reinterpret_cast<scalar_t*>(
in_ptr + offsets[1]
);
using primitive_t = typename scalar_value_type<scalar_t>::type;
auto* __restrict__ coeff_data = reinterpret_cast<primitive_t*>(
coeff_ptr + offsets[2]
);
// perform summation
for (int32_t i = 0; i < num_summations; ++i) {
*out_data += in_data[i * in_stride] * coeff_data[i * coeff_stride];
}
};
_lauch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop);
}
void _compute_linear_combination_cuda_kernel(
TensorIterator& iter,
int64_t in_stride,
int64_t coeff_stride,
int64_t num_summations
) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"_compute_linear_combination_cuda", [&] () {
_compute_linear_combination_internal_kernel<scalar_t>(
iter, in_stride, coeff_stride, num_summations
);
}
);
}
}
REGISTER_DISPATCH(_compute_linear_combination_stub, &_compute_linear_combination_cuda_kernel);
}} // namespace at::native
|
57588ca2bebf6d4ea33ae8e8ad228eea0f968619.hip
|
// !!! This is a file automatically generated by hipify!!!
// Blelloch scan example for parallel programming reading course
//
// Auther: Frederik Andersen
//
// Setup of code and comment is modified from CUDA example addWithCuda
// Scan kernel is taken from https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html
//
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#define COMPUTE_BIN(n) n && 0x01
hipError_t histogram(int *h_in, int *h_out, unsigned int bins, unsigned int size);
void createArray(const int size, int* array);
void serialVersion(int* h_in, int* h_out, unsigned int bins, unsigned int size);
bool checkResults(int* array1, int* array2, unsigned int size);
// Blelloch scan
__global__ void naive_histo_kernel(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x;
int myItem = d_in[myId];
int myBin = COMPUTE_BIN(myItem);
atomicAdd(&(d_bins[myBin]), 1);
}
int main()
{
const int arraySize = 256;
const int bins = 2;
int h_in[arraySize] = { 0 };
int h_out[bins] = { 0 };
int serialResult[bins] = { 0 };
createArray(arraySize, h_in);
// Scan in serial.
serialVersion(h_in, serialResult, bins, arraySize);
// Scan in parallel.
hipError_t cudaStatus = histogram(h_in, h_out, bins, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("Histogram of %d elements with %d bins\n", arraySize, bins);
if (checkResults(h_out, serialResult, bins))
printf("Test Passed\n");
else
printf("Test Failed\n");
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t histogram(int *h_in, int *h_out, unsigned int bins, unsigned int size)
{
int *d_in = 0;
int *d_out = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&d_in, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_out, bins * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(d_in, h_in, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_out, h_out, bins * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
naive_histo_kernel<< <1, size>> >(d_out, d_in, bins);
cudaStatus = hipDeviceSynchronize();
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "scan_kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching scan_kernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(h_out, d_out, bins * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(d_out);
hipFree(d_in);
return cudaStatus;
}
void createArray(const int size, int* array)
{
for (int i = 0; i < size; i++)
{
array[i] = i;
}
}
void serialVersion(int* h_in, int* h_out, unsigned int bins, unsigned int size)
{
for (int i = 0; i < bins; i++)
{
h_out[i] = 0;
}
for (int i = 0; i < size; i++)
{
h_out[COMPUTE_BIN(h_in[i])]++;
}
}
bool checkResults(int* array1, int* array2, unsigned int size)
{
bool result = true;
for (unsigned int i = 0; i < size; i++)
{
if (array1[i] != array2[i])
{
result = false;
return result;
}
}
return result;
}
|
57588ca2bebf6d4ea33ae8e8ad228eea0f968619.cu
|
// Blelloch scan example for parallel programming reading course
//
// Auther: Frederik Andersen
//
// Setup of code and comment is modified from CUDA example addWithCuda
// Scan kernel is taken from https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html
//
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#define COMPUTE_BIN(n) n && 0x01
cudaError_t histogram(int *h_in, int *h_out, unsigned int bins, unsigned int size);
void createArray(const int size, int* array);
void serialVersion(int* h_in, int* h_out, unsigned int bins, unsigned int size);
bool checkResults(int* array1, int* array2, unsigned int size);
// Blelloch scan
__global__ void naive_histo_kernel(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x;
int myItem = d_in[myId];
int myBin = COMPUTE_BIN(myItem);
atomicAdd(&(d_bins[myBin]), 1);
}
int main()
{
const int arraySize = 256;
const int bins = 2;
int h_in[arraySize] = { 0 };
int h_out[bins] = { 0 };
int serialResult[bins] = { 0 };
createArray(arraySize, h_in);
// Scan in serial.
serialVersion(h_in, serialResult, bins, arraySize);
// Scan in parallel.
cudaError_t cudaStatus = histogram(h_in, h_out, bins, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("Histogram of %d elements with %d bins\n", arraySize, bins);
if (checkResults(h_out, serialResult, bins))
printf("Test Passed\n");
else
printf("Test Failed\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t histogram(int *h_in, int *h_out, unsigned int bins, unsigned int size)
{
int *d_in = 0;
int *d_out = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&d_in, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_out, bins * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(d_in, h_in, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_out, h_out, bins * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
naive_histo_kernel<< <1, size>> >(d_out, d_in, bins);
cudaStatus = cudaDeviceSynchronize();
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "scan_kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching scan_kernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(h_out, d_out, bins * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(d_out);
cudaFree(d_in);
return cudaStatus;
}
void createArray(const int size, int* array)
{
for (int i = 0; i < size; i++)
{
array[i] = i;
}
}
void serialVersion(int* h_in, int* h_out, unsigned int bins, unsigned int size)
{
for (int i = 0; i < bins; i++)
{
h_out[i] = 0;
}
for (int i = 0; i < size; i++)
{
h_out[COMPUTE_BIN(h_in[i])]++;
}
}
bool checkResults(int* array1, int* array2, unsigned int size)
{
bool result = true;
for (unsigned int i = 0; i < size; i++)
{
if (array1[i] != array2[i])
{
result = false;
return result;
}
}
return result;
}
|
0b575e78ef8645c064198a84d84613905dba4b13.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
0b575e78ef8645c064198a84d84613905dba4b13.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
bba34244abf7e234330550b7762b09717f8009a3.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#include <hip/hip_runtime.h>
#define PRECISION_z
__global__ void
magma_zget_row_ptr_kernel(
const magma_int_t num_rows,
magma_int_t* nnz,
const magma_index_t* __restrict__ rowidx,
magma_index_t* rowptr)
{
//int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
//int nnz;
/*magma_int_t nnz_per_row;
if(k<num_rows){
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
nnz_per_row = __ldg( rowidx + k );
#else
nnz_per_row = rowidx[k];
#endif
atomicAdd(&nnz,nnz_per_row);
}
if (k < 2)
{
if(k==1)
{
rowptr[0] = 0;
rowptr[1] = rowidx[0];
for(int iter=2;iter<(num_rows+1)/2;++iter){
rowptr[iter] = rowptr[iter-1]+rowidx[iter-1];
}
}
else{
rowptr[num_rows] = nnz;
for(int iter=num_rows-1;iter>(num_rows+1)/2;iter--){
rowptr[iter] = rowptr[iter+1]-rowidx[iter];
}
}
}
*/
//naive implementation for now.
if (k==1) {
rowptr[0] = 0;
for(int iter=1;iter<=num_rows;++iter){
rowptr[iter] = rowptr[iter-1]+rowidx[iter-1];
}
nnz[0] = rowptr[num_rows];
}
} //kernel
extern "C" magma_int_t
magma_zget_row_ptr(
const magma_int_t num_rows,
magma_int_t *nnz,
const magma_index_t* rowidx,
magma_index_t* rowptr,
magma_queue_t queue)
{
/*
int blocksize = 128;
int gridsize = magma_ceildiv(num_rows, blocksize);
magma_int_t *nnz_dev, *tnnz;
magma_imalloc(&nnz_dev, 1);
magma_imalloc_cpu(&tnnz, 1);
dim3 block(blocksize,1,1);
dim3 grid(gridsize,1,1);
magma_zget_row_ptr_kernel<<<grid, block, 0, queue->cuda_stream()>>>
(num_rows, nnz_dev, rowidx, rowptr);
magma_igetvector(1,nnz_dev,1,tnnz,1,queue);
*nnz = tnnz[0];
magma_free(nnz_dev);
magma_free_cpu(tnnz);
*/
magma_index_t *hrowidx, *hrowptr;
magma_index_malloc_cpu(&hrowidx, num_rows);
magma_index_malloc_cpu(&hrowptr, num_rows+1);
magma_index_getvector(num_rows,rowidx,1,hrowidx,1,queue);
hrowptr[0] = 0;
for(int iter=1;iter<=num_rows;++iter){
hrowptr[iter] = hrowptr[iter-1]+hrowidx[iter-1];
}
*nnz = hrowptr[num_rows];
magma_index_setvector(num_rows+1,hrowptr,1,rowptr,1,queue);
magma_free_cpu(hrowidx);
magma_free_cpu(hrowptr);
return MAGMA_SUCCESS;
}
|
bba34244abf7e234330550b7762b09717f8009a3.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#include <cuda_runtime.h>
#define PRECISION_z
__global__ void
magma_zget_row_ptr_kernel(
const magma_int_t num_rows,
magma_int_t* nnz,
const magma_index_t* __restrict__ rowidx,
magma_index_t* rowptr)
{
//int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
//int nnz;
/*magma_int_t nnz_per_row;
if(k<num_rows){
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
nnz_per_row = __ldg( rowidx + k );
#else
nnz_per_row = rowidx[k];
#endif
atomicAdd(&nnz,nnz_per_row);
}
if (k < 2)
{
if(k==1)
{
rowptr[0] = 0;
rowptr[1] = rowidx[0];
for(int iter=2;iter<(num_rows+1)/2;++iter){
rowptr[iter] = rowptr[iter-1]+rowidx[iter-1];
}
}
else{
rowptr[num_rows] = nnz;
for(int iter=num_rows-1;iter>(num_rows+1)/2;iter--){
rowptr[iter] = rowptr[iter+1]-rowidx[iter];
}
}
}
*/
//naive implementation for now.
if (k==1) {
rowptr[0] = 0;
for(int iter=1;iter<=num_rows;++iter){
rowptr[iter] = rowptr[iter-1]+rowidx[iter-1];
}
nnz[0] = rowptr[num_rows];
}
} //kernel
extern "C" magma_int_t
magma_zget_row_ptr(
const magma_int_t num_rows,
magma_int_t *nnz,
const magma_index_t* rowidx,
magma_index_t* rowptr,
magma_queue_t queue)
{
/*
int blocksize = 128;
int gridsize = magma_ceildiv(num_rows, blocksize);
magma_int_t *nnz_dev, *tnnz;
magma_imalloc(&nnz_dev, 1);
magma_imalloc_cpu(&tnnz, 1);
dim3 block(blocksize,1,1);
dim3 grid(gridsize,1,1);
magma_zget_row_ptr_kernel<<<grid, block, 0, queue->cuda_stream()>>>
(num_rows, nnz_dev, rowidx, rowptr);
magma_igetvector(1,nnz_dev,1,tnnz,1,queue);
*nnz = tnnz[0];
magma_free(nnz_dev);
magma_free_cpu(tnnz);
*/
magma_index_t *hrowidx, *hrowptr;
magma_index_malloc_cpu(&hrowidx, num_rows);
magma_index_malloc_cpu(&hrowptr, num_rows+1);
magma_index_getvector(num_rows,rowidx,1,hrowidx,1,queue);
hrowptr[0] = 0;
for(int iter=1;iter<=num_rows;++iter){
hrowptr[iter] = hrowptr[iter-1]+hrowidx[iter-1];
}
*nnz = hrowptr[num_rows];
magma_index_setvector(num_rows+1,hrowptr,1,rowptr,1,queue);
magma_free_cpu(hrowidx);
magma_free_cpu(hrowptr);
return MAGMA_SUCCESS;
}
|
488f5e16f799558720d8b86e4dad421a8059f8e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
void testmain(int size, int *c)
{
int *a, *b; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c;
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); a[0]=1;
b = (int *)malloc(size); b[0]=4;
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
// Cleanup
free(a); free(b);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return;
}
|
488f5e16f799558720d8b86e4dad421a8059f8e7.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
void testmain(int size, int *c)
{
int *a, *b; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c;
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); a[0]=1;
b = (int *)malloc(size); b[0]=4;
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
free(a); free(b);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return;
}
|
dc647a2de575001974e326f36158d65b0ae9e882.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kMartixByMatrixElementwise(const int nThreads, const float *m1, const float *m2, float *output) {
/* Computes the product of two arrays (elementwise multiplication).
Inputs:
m1: array
m2: array
output: array,the results of the multiplication are to be stored here
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = m1[i] * m2[i];
}
}
|
dc647a2de575001974e326f36158d65b0ae9e882.cu
|
#include "includes.h"
__global__ void kMartixByMatrixElementwise(const int nThreads, const float *m1, const float *m2, float *output) {
/* Computes the product of two arrays (elementwise multiplication).
Inputs:
m1: array
m2: array
output: array,the results of the multiplication are to be stored here
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = m1[i] * m2[i];
}
}
|
ef6e2d897dea8398385d54d699a53f6a0f4b9c25.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/bitonicen.htm
#include <assert.h>
#include <helper_cuda.h>
#include "sortingNetworks_common.h"
#include "sortingNetworks_common.cuh"
////////////////////////////////////////////////////////////////////////////////
// Monolithic bitonic sort kernel for short arrays fitting into shared memory
////////////////////////////////////////////////////////////////////////////////
__global__ void bitonicSortShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint dir
)
{
//Shared memory storage for one or more short vectors
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subbatch and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for (uint size = 2; size < arrayLength; size <<= 1)
{
//Bitonic merge
uint ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (uint stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
//ddd == dir for the last bitonic merge step
{
for (uint stride = arrayLength / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
dir
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
////////////////////////////////////////////////////////////////////////////////
// Bitonic sort kernel for large arrays (not fitting into shared memory)
////////////////////////////////////////////////////////////////////////////////
//Bottom-level bitonic sort
//Almost the same as bitonicSortShared with the exception of
//even / odd subarrays being sorted in opposite directions
//Bitonic merge accepts both
//Ascending | descending or descending | ascending sorted pairs
__global__ void bitonicSortShared1(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal
)
{
//Shared memory storage for current subarray
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subarray and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for (uint size = 2; size < SHARED_SIZE_LIMIT; size <<= 1)
{
//Bitonic merge
uint ddd = (threadIdx.x & (size / 2)) != 0;
for (uint stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
//Odd / even arrays of SHARED_SIZE_LIMIT elements
//sorted in opposite directions
uint ddd = blockIdx.x & 1;
{
for (uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
//Bitonic merge iteration for stride >= SHARED_SIZE_LIMIT
__global__ void bitonicMergeGlobal(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint size,
uint stride,
uint dir
)
{
uint global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x;
uint comparatorI = global_comparatorI & (arrayLength / 2 - 1);
//Bitonic merge
uint ddd = dir ^ ((comparatorI & (size / 2)) != 0);
uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1));
uint keyA = d_SrcKey[pos + 0];
uint valA = d_SrcVal[pos + 0];
uint keyB = d_SrcKey[pos + stride];
uint valB = d_SrcVal[pos + stride];
Comparator(
keyA, valA,
keyB, valB,
ddd
);
d_DstKey[pos + 0] = keyA;
d_DstVal[pos + 0] = valA;
d_DstKey[pos + stride] = keyB;
d_DstVal[pos + stride] = valB;
}
//Combined bitonic merge steps for
//size > SHARED_SIZE_LIMIT and stride = [1 .. SHARED_SIZE_LIMIT / 2]
__global__ void bitonicMergeShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint size,
uint dir
)
{
//Shared memory storage for current subarray
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
//Bitonic merge
uint comparatorI = UMAD(blockIdx.x, blockDim.x, threadIdx.x) & ((arrayLength / 2) - 1);
uint ddd = dir ^ ((comparatorI & (size / 2)) != 0);
for (uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Helper function (also used by odd-even merge sort)
extern "C" uint factorRadix2(uint *log2L, uint L)
{
if (!L)
{
*log2L = 0;
return 0;
}
else
{
for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++);
return L;
}
}
extern "C" uint bitonicSort(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint batchSize,
uint arrayLength,
uint dir
)
{
//Nothing to sort
if (arrayLength < 2)
return 0;
//Only power-of-two array lengths are supported by this implementation
uint log2L;
uint factorizationRemainder = factorRadix2(&log2L, arrayLength);
assert(factorizationRemainder == 1);
dir = (dir != 0);
uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT;
uint threadCount = SHARED_SIZE_LIMIT / 2;
if (arrayLength <= SHARED_SIZE_LIMIT)
{
assert((batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0);
hipLaunchKernelGGL(( bitonicSortShared), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir);
}
else
{
hipLaunchKernelGGL(( bitonicSortShared1), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("Async kernel error: %s\n", hipGetErrorString(errAsync));
for (uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1)
for (unsigned stride = size / 2; stride > 0; stride >>= 1)
if (stride >= SHARED_SIZE_LIMIT)
{
hipLaunchKernelGGL(( bitonicMergeGlobal), dim3((batchSize * arrayLength) / 512), dim3(256), 0, 0, d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir);
}
else
{
hipLaunchKernelGGL(( bitonicMergeShared), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, dir);
break;
}
}
return threadCount;
}
|
ef6e2d897dea8398385d54d699a53f6a0f4b9c25.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/bitonicen.htm
#include <assert.h>
#include <helper_cuda.h>
#include "sortingNetworks_common.h"
#include "sortingNetworks_common.cuh"
////////////////////////////////////////////////////////////////////////////////
// Monolithic bitonic sort kernel for short arrays fitting into shared memory
////////////////////////////////////////////////////////////////////////////////
__global__ void bitonicSortShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint dir
)
{
//Shared memory storage for one or more short vectors
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subbatch and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for (uint size = 2; size < arrayLength; size <<= 1)
{
//Bitonic merge
uint ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (uint stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
//ddd == dir for the last bitonic merge step
{
for (uint stride = arrayLength / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
dir
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
////////////////////////////////////////////////////////////////////////////////
// Bitonic sort kernel for large arrays (not fitting into shared memory)
////////////////////////////////////////////////////////////////////////////////
//Bottom-level bitonic sort
//Almost the same as bitonicSortShared with the exception of
//even / odd subarrays being sorted in opposite directions
//Bitonic merge accepts both
//Ascending | descending or descending | ascending sorted pairs
__global__ void bitonicSortShared1(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal
)
{
//Shared memory storage for current subarray
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subarray and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for (uint size = 2; size < SHARED_SIZE_LIMIT; size <<= 1)
{
//Bitonic merge
uint ddd = (threadIdx.x & (size / 2)) != 0;
for (uint stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
//Odd / even arrays of SHARED_SIZE_LIMIT elements
//sorted in opposite directions
uint ddd = blockIdx.x & 1;
{
for (uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
//Bitonic merge iteration for stride >= SHARED_SIZE_LIMIT
__global__ void bitonicMergeGlobal(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint size,
uint stride,
uint dir
)
{
uint global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x;
uint comparatorI = global_comparatorI & (arrayLength / 2 - 1);
//Bitonic merge
uint ddd = dir ^ ((comparatorI & (size / 2)) != 0);
uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1));
uint keyA = d_SrcKey[pos + 0];
uint valA = d_SrcVal[pos + 0];
uint keyB = d_SrcKey[pos + stride];
uint valB = d_SrcVal[pos + stride];
Comparator(
keyA, valA,
keyB, valB,
ddd
);
d_DstKey[pos + 0] = keyA;
d_DstVal[pos + 0] = valA;
d_DstKey[pos + stride] = keyB;
d_DstVal[pos + stride] = valB;
}
//Combined bitonic merge steps for
//size > SHARED_SIZE_LIMIT and stride = [1 .. SHARED_SIZE_LIMIT / 2]
__global__ void bitonicMergeShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint size,
uint dir
)
{
//Shared memory storage for current subarray
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
//Bitonic merge
uint comparatorI = UMAD(blockIdx.x, blockDim.x, threadIdx.x) & ((arrayLength / 2) - 1);
uint ddd = dir ^ ((comparatorI & (size / 2)) != 0);
for (uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Helper function (also used by odd-even merge sort)
extern "C" uint factorRadix2(uint *log2L, uint L)
{
if (!L)
{
*log2L = 0;
return 0;
}
else
{
for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++);
return L;
}
}
extern "C" uint bitonicSort(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint batchSize,
uint arrayLength,
uint dir
)
{
//Nothing to sort
if (arrayLength < 2)
return 0;
//Only power-of-two array lengths are supported by this implementation
uint log2L;
uint factorizationRemainder = factorRadix2(&log2L, arrayLength);
assert(factorizationRemainder == 1);
dir = (dir != 0);
uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT;
uint threadCount = SHARED_SIZE_LIMIT / 2;
if (arrayLength <= SHARED_SIZE_LIMIT)
{
assert((batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0);
bitonicSortShared<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir);
}
else
{
bitonicSortShared1<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
for (uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1)
for (unsigned stride = size / 2; stride > 0; stride >>= 1)
if (stride >= SHARED_SIZE_LIMIT)
{
bitonicMergeGlobal<<<(batchSize * arrayLength) / 512, 256>>>(d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir);
}
else
{
bitonicMergeShared<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, dir);
break;
}
}
return threadCount;
}
|
c60d14eb28b52c6e2c9a67c8d3c64583ec993e39.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// reorder operation before all2all in backward propagation
template <typename TypeEmbeddingComp>
__global__ void backward_reorder_kernel(int batch_size_per_gpu, int slot_num,
int embedding_vec_size, int gpu_num,
const TypeEmbeddingComp *input, TypeEmbeddingComp *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
int src_offset = sample_id * slot_num * embedding_vec_size;
int src_stride = embedding_vec_size;
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int dst_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int src_addr = src_offset + src_stride * slot_id;
output[dst_addr + tid] = input[src_addr + tid];
}
}
}
// reorder operation before all2all in backward propagation
__global__ void backward_reorder_align2_kernel(int batch_size_per_gpu, int slot_num,
int embedding_vec_size, int gpu_num,
const __half *input, __half *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
const __half2 *input2 = reinterpret_cast<const __half2 *>(input);
__half2 *output2 = reinterpret_cast<__half2 *>(output);
int src_offset = sample_id * slot_num * embedding_vec_size;
int src_stride = embedding_vec_size;
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int dst_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int src_addr = src_offset + src_stride * slot_id;
output2[dst_addr + tid] = input2[src_addr + tid];
}
}
}
template <typename TypeEmbeddingComp>
void do_backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output, hipStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( backward_reorder_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
void do_backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const __half *input, __half *output,
hipStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
hipLaunchKernelGGL(( backward_reorder_align2_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size / 2, total_gpu_count, input, output);
} else {
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( backward_reorder_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
}
} // namespace
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::backward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
do_backward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count,
src_tensors[id].get_ptr(), dst_tensors[id].get_ptr(),
local_gpu->get_stream());
}
}
template void SparseEmbeddingFunctors::backward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR
|
c60d14eb28b52c6e2c9a67c8d3c64583ec993e39.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// reorder operation before all2all in backward propagation
template <typename TypeEmbeddingComp>
__global__ void backward_reorder_kernel(int batch_size_per_gpu, int slot_num,
int embedding_vec_size, int gpu_num,
const TypeEmbeddingComp *input, TypeEmbeddingComp *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
int src_offset = sample_id * slot_num * embedding_vec_size;
int src_stride = embedding_vec_size;
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int dst_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int src_addr = src_offset + src_stride * slot_id;
output[dst_addr + tid] = input[src_addr + tid];
}
}
}
// reorder operation before all2all in backward propagation
__global__ void backward_reorder_align2_kernel(int batch_size_per_gpu, int slot_num,
int embedding_vec_size, int gpu_num,
const __half *input, __half *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
const __half2 *input2 = reinterpret_cast<const __half2 *>(input);
__half2 *output2 = reinterpret_cast<__half2 *>(output);
int src_offset = sample_id * slot_num * embedding_vec_size;
int src_stride = embedding_vec_size;
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int dst_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int src_addr = src_offset + src_stride * slot_id;
output2[dst_addr + tid] = input2[src_addr + tid];
}
}
}
template <typename TypeEmbeddingComp>
void do_backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output, cudaStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
const size_t block_size = embedding_vec_size;
backward_reorder_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
void do_backward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const __half *input, __half *output,
cudaStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
backward_reorder_align2_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size / 2, total_gpu_count, input, output);
} else {
const size_t block_size = embedding_vec_size;
backward_reorder_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
}
} // namespace
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::backward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
do_backward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count,
src_tensors[id].get_ptr(), dst_tensors[id].get_ptr(),
local_gpu->get_stream());
}
}
template void SparseEmbeddingFunctors::backward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR
|
b18e3eb1a159ffe91372f36b6f8bbb57cb95c9df.hip
|
// !!! This is a file automatically generated by hipify!!!
//#ifdef enable_gpu
#include "gpus/cuda_handle_error.h"
//#include "gpus/cusparse_spmm.h"
#include "gpus/gpu_csr_kernel.h"
#include "gpus/timer.h"
#include "tools/ntimer.h"
#include "sort_network.cuh"
//#include "large.cuh"
#include "radix_sort.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <thrust/remove.h>
#include <assert.h>
//#endif
int inline qmin(const int a, const int b) {
if (a < b) return a;
return b;
}
__device__ inline int dqueueID(long x) {
if (x == 0) return 0;
else if (x == 1) return 1;
else if (x > 1024) return 12;
int ret = 2;
int up = 2;
for (up = 2; ; up *= 2, ++ret) {
if (x <= up) return ret;
}
}
template<int BLOCK_THREADS>
__global__ void printFlops(const int IA[],const int JA[], const int IB[], const int drowids[], const int gcount, unsigned int row_count_array[]) {
for(int k = blockIdx.x;k<gcount;k+=gridDim.x) {
int rowId = drowids[k];
int endRow = IA[rowId + 1];
for(int i = threadIdx.x+IA[rowId];i<endRow;i+=blockDim.x) {
int a = JA[i];
long flops = IB[a+1] - IB[a];
int index = dqueueID(flops);
atomicAdd(&row_count_array[index],1);
}
}
}
void count_row_flops(const CSR &dA, const CSR &dB, int *drowIds, const vector<int> &hv) {
unsigned int *drow_count_array;
unsigned int hrow_count_array[13];
HANDLE_ERROR(hipMalloc((void**)&drow_count_array, 13 * sizeof(unsigned int)));
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
int blocks = qmin(65535, hv[2] - hv[1]);
int threads = 256;
printf("Binwise distribution of per element for bin 1 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[1], hv[2] - hv[1], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[3] - hv[2]);
threads = 256;
printf("Binwise distribution of per element for bin 2 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[2], hv[3] - hv[2], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[4] - hv[3]);
threads = 256;
printf("Binwise distribution of per element for bin 3 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[3], hv[4] - hv[3], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[5] - hv[4]);
threads = 256;
printf("Binwise distribution of per element for bin 4 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[4], hv[5] - hv[4], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[6] - hv[5]);
threads = 256;
printf("Binwise distribution of per element for bin 5 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[5], hv[6] - hv[5], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[7] - hv[6]);
threads = 256;
printf("Binwise distribution of per element for bin 6 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[6], hv[7] - hv[6], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[8] - hv[7]);
threads = 256;
printf("Binwise distribution of per element for bin 7 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[7], hv[8] - hv[7], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[9] - hv[8]);
threads = 256;
printf("Binwise distribution of per element for bin 8 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[8], hv[9] - hv[8], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[10] - hv[9]);
threads = 256;
printf("Binwise distribution of per element for bin 9 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[9], hv[10] - hv[9], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[11] - hv[10]);
threads = 256;
printf("Binwise distribution of per element for bin 10 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[10], hv[11] - hv[10], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[12] - hv[11]);
threads = 256;
printf("Binwise distribution of per element for bin 11 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[11], hv[12] - hv[11], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[64] - hv[63]);
threads = 256;
printf("Binwise distribution of per element for bin 12 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[63], hv[64] - hv[63], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[1] - hv[0]);
threads = 256;
printf("Binwise distribution of per element for bin 0 \n");
hipLaunchKernelGGL(( printFlops<256>), dim3(blocks),dim3(threads), 0, 0, dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[0], hv[1] - hv[0], drow_count_array);
HANDLE_ERROR(hipMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(hipFree(drow_count_array));
}
|
b18e3eb1a159ffe91372f36b6f8bbb57cb95c9df.cu
|
//#ifdef enable_gpu
#include "gpus/cuda_handle_error.h"
//#include "gpus/cusparse_spmm.h"
#include "gpus/gpu_csr_kernel.h"
#include "gpus/timer.h"
#include "tools/ntimer.h"
#include "sort_network.cuh"
//#include "large.cuh"
#include "radix_sort.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <thrust/remove.h>
#include <assert.h>
//#endif
int inline qmin(const int a, const int b) {
if (a < b) return a;
return b;
}
__device__ inline int dqueueID(long x) {
if (x == 0) return 0;
else if (x == 1) return 1;
else if (x > 1024) return 12;
int ret = 2;
int up = 2;
for (up = 2; ; up *= 2, ++ret) {
if (x <= up) return ret;
}
}
template<int BLOCK_THREADS>
__global__ void printFlops(const int IA[],const int JA[], const int IB[], const int drowids[], const int gcount, unsigned int row_count_array[]) {
for(int k = blockIdx.x;k<gcount;k+=gridDim.x) {
int rowId = drowids[k];
int endRow = IA[rowId + 1];
for(int i = threadIdx.x+IA[rowId];i<endRow;i+=blockDim.x) {
int a = JA[i];
long flops = IB[a+1] - IB[a];
int index = dqueueID(flops);
atomicAdd(&row_count_array[index],1);
}
}
}
void count_row_flops(const CSR &dA, const CSR &dB, int *drowIds, const vector<int> &hv) {
unsigned int *drow_count_array;
unsigned int hrow_count_array[13];
HANDLE_ERROR(cudaMalloc((void**)&drow_count_array, 13 * sizeof(unsigned int)));
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
int blocks = qmin(65535, hv[2] - hv[1]);
int threads = 256;
printf("Binwise distribution of per element for bin 1 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[1], hv[2] - hv[1], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[3] - hv[2]);
threads = 256;
printf("Binwise distribution of per element for bin 2 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[2], hv[3] - hv[2], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[4] - hv[3]);
threads = 256;
printf("Binwise distribution of per element for bin 3 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[3], hv[4] - hv[3], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[5] - hv[4]);
threads = 256;
printf("Binwise distribution of per element for bin 4 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[4], hv[5] - hv[4], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[6] - hv[5]);
threads = 256;
printf("Binwise distribution of per element for bin 5 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[5], hv[6] - hv[5], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[7] - hv[6]);
threads = 256;
printf("Binwise distribution of per element for bin 6 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[6], hv[7] - hv[6], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[8] - hv[7]);
threads = 256;
printf("Binwise distribution of per element for bin 7 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[7], hv[8] - hv[7], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[9] - hv[8]);
threads = 256;
printf("Binwise distribution of per element for bin 8 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[8], hv[9] - hv[8], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[10] - hv[9]);
threads = 256;
printf("Binwise distribution of per element for bin 9 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[9], hv[10] - hv[9], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[11] - hv[10]);
threads = 256;
printf("Binwise distribution of per element for bin 10 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[10], hv[11] - hv[10], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[12] - hv[11]);
threads = 256;
printf("Binwise distribution of per element for bin 11 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[11], hv[12] - hv[11], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[64] - hv[63]);
threads = 256;
printf("Binwise distribution of per element for bin 12 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[63], hv[64] - hv[63], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaMemset(drow_count_array,0,13 * sizeof(unsigned int)));
blocks = qmin(65535, hv[1] - hv[0]);
threads = 256;
printf("Binwise distribution of per element for bin 0 \n");
printFlops<256><<<blocks,threads>>>(dA.rowPtr,dA.colInd,dB.rowPtr,drowIds + hv[0], hv[1] - hv[0], drow_count_array);
HANDLE_ERROR(cudaMemcpy((void*) hrow_count_array, (void*) drow_count_array, 13 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaGetLastError());
for(int i=1;i<13;i++)
printf("count %d : %lu \n" , i, hrow_count_array[i]);
HANDLE_ERROR(cudaFree(drow_count_array));
}
|
afe4c33a3c653d33223d257ae00d889fde7e4338.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
/**
* CUDA Kernel Device code
* done nothing
*/
__global__ void kernel(void) {}
/**
* Host main routine
*/
int main(void)
{
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
printf("Hello World\n");
return 0;
}
|
afe4c33a3c653d33223d257ae00d889fde7e4338.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
* done nothing
*/
__global__ void kernel(void) {}
/**
* Host main routine
*/
int main(void)
{
kernel<<<1,1>>>();
printf("Hello World\n");
return 0;
}
|
244a00caa69f48772e680d2a97fb3379504ffdf6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "ResultSetSortImpl.h"
#include "BufferCompaction.h"
#include "GpuMemUtils.h"
#include "GpuRtConstants.h"
#include "ResultSetBufferAccessors.h"
#include "SortUtils.cuh"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#define FORCE_CPU_VERSION
#include "BufferEntryUtils.h"
#undef FORCE_CPU_VERSION
namespace {
template <class K, class V, class I>
std::vector<uint32_t> do_radix_sort(const ExecutorDeviceType device_type,
ThrustAllocator& thrust_allocator,
const int8_t* groupby_buffer,
V dev_oe_col_buffer_begin,
V dev_oe_col_buffer_end,
I dev_idx_buff_begin,
const size_t dev_idx_buff_size,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n) {
if (dev_idx_buff_size == 0) {
return {};
}
if (oe.is_desc) {
if (device_type == ExecutorDeviceType::GPU) {
thrust::sort_by_key(thrust::device(thrust_allocator),
dev_oe_col_buffer_begin,
dev_oe_col_buffer_end,
dev_idx_buff_begin,
thrust::greater<int64_t>());
} else {
thrust::sort_by_key(
dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>());
}
} else {
if (device_type == ExecutorDeviceType::GPU) {
thrust::sort_by_key(
thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin);
} else {
thrust::sort_by_key(dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin);
}
}
// Speculatively transfer only the top_n first, most of the time it'll be enough.
thrust::host_vector<uint32_t> host_vector_result(dev_idx_buff_begin,
dev_idx_buff_begin + ::min(top_n, dev_idx_buff_size));
// Sometimes, radix sort can bring to the front entries which are empty.
// For example, ascending sort on COUNT(*) will bring non-existent groups
// to the front of dev_idx_buff since they're 0 in our system. Re-do the
// transfer in that case to bring the entire dev_idx_buff; existing logic
// in row iteration will take care of skipping the empty rows.
for (size_t i = 0; i < host_vector_result.size(); ++i) {
const auto entry_idx = host_vector_result[i];
if (is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) {
host_vector_result = thrust::host_vector<uint32_t>(dev_idx_buff_begin, dev_idx_buff_begin + dev_idx_buff_size);
break;
}
}
std::vector<uint32_t> result;
result.reserve(::min(top_n, host_vector_result.size()));
for (size_t i = 0; i < host_vector_result.size(); ++i) {
const auto entry_idx = host_vector_result[i];
if (!is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) {
result.push_back(entry_idx);
if (result.size() >= top_n) {
break;
}
}
}
return result;
}
void add_nulls(std::vector<uint32_t>& idx_buff, const std::vector<uint32_t>& null_idx_buff, const PodOrderEntry& oe) {
if (null_idx_buff.empty()) {
return;
}
const auto insertion_point = oe.nulls_first ? idx_buff.begin() : idx_buff.end();
idx_buff.insert(insertion_point, null_idx_buff.begin(), null_idx_buff.end());
}
template <typename T>
thrust::device_ptr<T> get_device_copy_ptr(const thrust::host_vector<T>& host_vec, ThrustAllocator& thrust_allocator) {
if (host_vec.empty()) {
return thrust::device_ptr<T>(static_cast<T*>(nullptr));
}
const auto host_vec_bytes = host_vec.size() * sizeof(T);
T* dev_ptr = reinterpret_cast<T*>(thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes)));
copy_to_gpu(thrust_allocator.getDataMgr(),
reinterpret_cast<hipDeviceptr_t>(dev_ptr),
&host_vec[0],
host_vec_bytes,
thrust_allocator.getDeviceId());
return thrust::device_ptr<T>(dev_ptr);
}
template <class K>
std::vector<uint32_t> baseline_sort_fp(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const thrust::host_vector<int64_t>& oe_col_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
thrust::host_vector<uint32_t> neg_idx_buff;
thrust::host_vector<uint32_t> pos_idx_buff;
std::vector<uint32_t> null_idx_buff;
thrust::host_vector<int64_t> neg_oe_col_buffer;
thrust::host_vector<int64_t> pos_oe_col_buffer;
const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0);
neg_idx_buff.reserve(slice_entry_count);
pos_idx_buff.reserve(slice_entry_count);
null_idx_buff.reserve(slice_entry_count);
neg_oe_col_buffer.reserve(slice_entry_count);
pos_oe_col_buffer.reserve(slice_entry_count);
size_t oe_col_buffer_idx = 0;
const auto& oe_info = layout.oe_target_info;
const auto col_ti = oe_info.agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false) : oe_info.sql_type;
// Execlude AVG b/c collect_order_entry_column already makes its pair collapse into a double
const bool float_argument_input = takes_float_argument(oe_info) && oe_info.agg_kind != kAVG;
auto is_negative = float_argument_input ? [](const int64_t v) -> bool { return (v & (1 << 31)) != 0; }
: [](const int64_t v) -> bool { return v < 0; };
for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) {
if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) &&
oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(col_ti, float_argument_input)) {
null_idx_buff.push_back(i);
continue;
}
if (is_negative(oe_col_buffer[oe_col_buffer_idx])) { // sign bit works the same for integer and floating point
neg_idx_buff.push_back(i);
neg_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
} else {
pos_idx_buff.push_back(i);
pos_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
}
}
std::vector<uint32_t> pos_result;
ThrustAllocator thrust_allocator(data_mgr, device_id);
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_pos_idx_buff = get_device_copy_ptr(pos_idx_buff, thrust_allocator);
const auto dev_pos_oe_col_buffer = get_device_copy_ptr(pos_oe_col_buffer, thrust_allocator);
pos_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_pos_oe_col_buffer,
dev_pos_oe_col_buffer + pos_oe_col_buffer.size(),
dev_pos_idx_buff,
pos_idx_buff.size(),
oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
pos_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
pos_oe_col_buffer.begin(),
pos_oe_col_buffer.end(),
pos_idx_buff.begin(),
pos_idx_buff.size(),
oe,
layout,
top_n);
}
std::vector<uint32_t> neg_result;
PodOrderEntry reverse_oe{oe.tle_no, !oe.is_desc, oe.nulls_first};
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_neg_idx_buff = get_device_copy_ptr(neg_idx_buff, thrust_allocator);
const auto dev_neg_oe_col_buffer = get_device_copy_ptr(neg_oe_col_buffer, thrust_allocator);
neg_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_neg_oe_col_buffer,
dev_neg_oe_col_buffer + neg_oe_col_buffer.size(),
dev_neg_idx_buff,
neg_idx_buff.size(),
reverse_oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
neg_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
neg_oe_col_buffer.begin(),
neg_oe_col_buffer.end(),
neg_idx_buff.begin(),
neg_idx_buff.size(),
reverse_oe,
layout,
top_n);
}
if (oe.is_desc) {
pos_result.insert(pos_result.end(), neg_result.begin(), neg_result.end());
add_nulls(pos_result, null_idx_buff, oe);
return pos_result;
}
neg_result.insert(neg_result.end(), pos_result.begin(), pos_result.end());
add_nulls(neg_result, null_idx_buff, oe);
return neg_result;
}
template <class K>
std::vector<uint32_t> baseline_sort_int(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const thrust::host_vector<int64_t>& oe_col_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
const auto& entry_ti = get_compact_type(layout.oe_target_info);
std::vector<uint32_t> null_idx_buff;
thrust::host_vector<uint32_t> notnull_idx_buff;
const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0);
null_idx_buff.reserve(slice_entry_count);
notnull_idx_buff.reserve(slice_entry_count);
thrust::host_vector<int64_t> notnull_oe_col_buffer;
notnull_oe_col_buffer.reserve(slice_entry_count);
size_t oe_col_buffer_idx = 0;
for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) {
if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) &&
oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(entry_ti, false)) {
null_idx_buff.push_back(i);
} else {
notnull_idx_buff.push_back(i);
notnull_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
}
}
std::vector<uint32_t> notnull_result;
ThrustAllocator thrust_allocator(data_mgr, device_id);
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_notnull_idx_buff = get_device_copy_ptr(notnull_idx_buff, thrust_allocator);
const auto dev_notnull_oe_col_buffer = get_device_copy_ptr(notnull_oe_col_buffer, thrust_allocator);
notnull_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_notnull_oe_col_buffer,
dev_notnull_oe_col_buffer + notnull_oe_col_buffer.size(),
dev_notnull_idx_buff,
notnull_idx_buff.size(),
oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
notnull_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
notnull_oe_col_buffer.begin(),
notnull_oe_col_buffer.end(),
notnull_idx_buff.begin(),
notnull_idx_buff.size(),
oe,
layout,
top_n);
}
add_nulls(notnull_result, null_idx_buff, oe);
return notnull_result;
}
template <class K>
thrust::host_vector<int64_t> collect_order_entry_column(const int8_t* groupby_buffer,
const GroupByBufferLayoutInfo& layout,
const size_t start,
const size_t step) {
thrust::host_vector<int64_t> oe_col_buffer;
const auto row_ptr = groupby_buffer + start * layout.row_bytes;
auto crt_group_ptr1 =
layout.target_groupby_index >= 0 ? row_ptr + layout.target_groupby_index * sizeof(K) : row_ptr + layout.col_off;
const int8_t* crt_group_ptr2{nullptr};
if (layout.oe_target_info.agg_kind == kAVG) {
crt_group_ptr2 = crt_group_ptr1 + layout.col_bytes;
}
const auto& entry_ti = get_compact_type(layout.oe_target_info);
const bool float_argument_input = takes_float_argument(layout.oe_target_info);
const auto step_bytes = layout.row_bytes * step;
for (size_t i = start; i < layout.entry_count; i += step) {
auto val1 = read_int_from_buff(crt_group_ptr1, layout.col_bytes > 0 ? layout.col_bytes : sizeof(K));
if (crt_group_ptr2) {
const auto val2 = read_int_from_buff(crt_group_ptr2, 8);
const auto avg_val = pair_to_double({val1, val2}, entry_ti, float_argument_input);
val1 = *reinterpret_cast<const int64_t*>(&avg_val);
}
oe_col_buffer.push_back(val1);
crt_group_ptr1 += step_bytes;
if (crt_group_ptr2) {
crt_group_ptr2 += step_bytes;
}
}
return oe_col_buffer;
}
} // namespace
template <class K>
std::vector<uint32_t> baseline_sort(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
auto oe_col_buffer = collect_order_entry_column<K>(groupby_buffer, layout, start, step);
const auto& entry_ti = get_compact_type(layout.oe_target_info);
CHECK(entry_ti.is_number());
if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) {
return baseline_sort_fp<K>(
device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step);
}
// Because of how we represent nulls for integral types, they'd be at the
// wrong position in these two cases. Separate them into a different buffer.
if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) {
return baseline_sort_int<K>(
device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step);
}
ThrustAllocator thrust_allocator(data_mgr, device_id);
// Fastest path, no need to separate nulls away since they'll end up at the
// right place as a side effect of how we're representing nulls.
if (device_type == ExecutorDeviceType::GPU) {
if (oe_col_buffer.empty()) {
return {};
}
const auto dev_idx_buff = get_device_ptr<uint32_t>(oe_col_buffer.size(), thrust_allocator);
thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step);
const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator);
return do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_oe_col_buffer,
dev_oe_col_buffer + oe_col_buffer.size(),
dev_idx_buff,
oe_col_buffer.size(),
oe,
layout,
top_n);
}
CHECK(device_type == ExecutorDeviceType::CPU);
thrust::host_vector<uint32_t> host_idx_buff(oe_col_buffer.size());
thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step);
return do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
oe_col_buffer.begin(),
oe_col_buffer.end(),
host_idx_buff.begin(),
host_idx_buff.size(),
oe,
layout,
top_n);
}
template std::vector<uint32_t> baseline_sort<int32_t>(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step);
template std::vector<uint32_t> baseline_sort<int64_t>(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step);
|
244a00caa69f48772e680d2a97fb3379504ffdf6.cu
|
#include "ResultSetSortImpl.h"
#include "BufferCompaction.h"
#include "GpuMemUtils.h"
#include "GpuRtConstants.h"
#include "ResultSetBufferAccessors.h"
#include "SortUtils.cuh"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#define FORCE_CPU_VERSION
#include "BufferEntryUtils.h"
#undef FORCE_CPU_VERSION
namespace {
template <class K, class V, class I>
std::vector<uint32_t> do_radix_sort(const ExecutorDeviceType device_type,
ThrustAllocator& thrust_allocator,
const int8_t* groupby_buffer,
V dev_oe_col_buffer_begin,
V dev_oe_col_buffer_end,
I dev_idx_buff_begin,
const size_t dev_idx_buff_size,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n) {
if (dev_idx_buff_size == 0) {
return {};
}
if (oe.is_desc) {
if (device_type == ExecutorDeviceType::GPU) {
thrust::sort_by_key(thrust::device(thrust_allocator),
dev_oe_col_buffer_begin,
dev_oe_col_buffer_end,
dev_idx_buff_begin,
thrust::greater<int64_t>());
} else {
thrust::sort_by_key(
dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>());
}
} else {
if (device_type == ExecutorDeviceType::GPU) {
thrust::sort_by_key(
thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin);
} else {
thrust::sort_by_key(dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin);
}
}
// Speculatively transfer only the top_n first, most of the time it'll be enough.
thrust::host_vector<uint32_t> host_vector_result(dev_idx_buff_begin,
dev_idx_buff_begin + std::min(top_n, dev_idx_buff_size));
// Sometimes, radix sort can bring to the front entries which are empty.
// For example, ascending sort on COUNT(*) will bring non-existent groups
// to the front of dev_idx_buff since they're 0 in our system. Re-do the
// transfer in that case to bring the entire dev_idx_buff; existing logic
// in row iteration will take care of skipping the empty rows.
for (size_t i = 0; i < host_vector_result.size(); ++i) {
const auto entry_idx = host_vector_result[i];
if (is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) {
host_vector_result = thrust::host_vector<uint32_t>(dev_idx_buff_begin, dev_idx_buff_begin + dev_idx_buff_size);
break;
}
}
std::vector<uint32_t> result;
result.reserve(std::min(top_n, host_vector_result.size()));
for (size_t i = 0; i < host_vector_result.size(); ++i) {
const auto entry_idx = host_vector_result[i];
if (!is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) {
result.push_back(entry_idx);
if (result.size() >= top_n) {
break;
}
}
}
return result;
}
void add_nulls(std::vector<uint32_t>& idx_buff, const std::vector<uint32_t>& null_idx_buff, const PodOrderEntry& oe) {
if (null_idx_buff.empty()) {
return;
}
const auto insertion_point = oe.nulls_first ? idx_buff.begin() : idx_buff.end();
idx_buff.insert(insertion_point, null_idx_buff.begin(), null_idx_buff.end());
}
template <typename T>
thrust::device_ptr<T> get_device_copy_ptr(const thrust::host_vector<T>& host_vec, ThrustAllocator& thrust_allocator) {
if (host_vec.empty()) {
return thrust::device_ptr<T>(static_cast<T*>(nullptr));
}
const auto host_vec_bytes = host_vec.size() * sizeof(T);
T* dev_ptr = reinterpret_cast<T*>(thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes)));
copy_to_gpu(thrust_allocator.getDataMgr(),
reinterpret_cast<CUdeviceptr>(dev_ptr),
&host_vec[0],
host_vec_bytes,
thrust_allocator.getDeviceId());
return thrust::device_ptr<T>(dev_ptr);
}
template <class K>
std::vector<uint32_t> baseline_sort_fp(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const thrust::host_vector<int64_t>& oe_col_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
thrust::host_vector<uint32_t> neg_idx_buff;
thrust::host_vector<uint32_t> pos_idx_buff;
std::vector<uint32_t> null_idx_buff;
thrust::host_vector<int64_t> neg_oe_col_buffer;
thrust::host_vector<int64_t> pos_oe_col_buffer;
const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0);
neg_idx_buff.reserve(slice_entry_count);
pos_idx_buff.reserve(slice_entry_count);
null_idx_buff.reserve(slice_entry_count);
neg_oe_col_buffer.reserve(slice_entry_count);
pos_oe_col_buffer.reserve(slice_entry_count);
size_t oe_col_buffer_idx = 0;
const auto& oe_info = layout.oe_target_info;
const auto col_ti = oe_info.agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false) : oe_info.sql_type;
// Execlude AVG b/c collect_order_entry_column already makes its pair collapse into a double
const bool float_argument_input = takes_float_argument(oe_info) && oe_info.agg_kind != kAVG;
auto is_negative = float_argument_input ? [](const int64_t v) -> bool { return (v & (1 << 31)) != 0; }
: [](const int64_t v) -> bool { return v < 0; };
for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) {
if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) &&
oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(col_ti, float_argument_input)) {
null_idx_buff.push_back(i);
continue;
}
if (is_negative(oe_col_buffer[oe_col_buffer_idx])) { // sign bit works the same for integer and floating point
neg_idx_buff.push_back(i);
neg_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
} else {
pos_idx_buff.push_back(i);
pos_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
}
}
std::vector<uint32_t> pos_result;
ThrustAllocator thrust_allocator(data_mgr, device_id);
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_pos_idx_buff = get_device_copy_ptr(pos_idx_buff, thrust_allocator);
const auto dev_pos_oe_col_buffer = get_device_copy_ptr(pos_oe_col_buffer, thrust_allocator);
pos_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_pos_oe_col_buffer,
dev_pos_oe_col_buffer + pos_oe_col_buffer.size(),
dev_pos_idx_buff,
pos_idx_buff.size(),
oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
pos_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
pos_oe_col_buffer.begin(),
pos_oe_col_buffer.end(),
pos_idx_buff.begin(),
pos_idx_buff.size(),
oe,
layout,
top_n);
}
std::vector<uint32_t> neg_result;
PodOrderEntry reverse_oe{oe.tle_no, !oe.is_desc, oe.nulls_first};
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_neg_idx_buff = get_device_copy_ptr(neg_idx_buff, thrust_allocator);
const auto dev_neg_oe_col_buffer = get_device_copy_ptr(neg_oe_col_buffer, thrust_allocator);
neg_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_neg_oe_col_buffer,
dev_neg_oe_col_buffer + neg_oe_col_buffer.size(),
dev_neg_idx_buff,
neg_idx_buff.size(),
reverse_oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
neg_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
neg_oe_col_buffer.begin(),
neg_oe_col_buffer.end(),
neg_idx_buff.begin(),
neg_idx_buff.size(),
reverse_oe,
layout,
top_n);
}
if (oe.is_desc) {
pos_result.insert(pos_result.end(), neg_result.begin(), neg_result.end());
add_nulls(pos_result, null_idx_buff, oe);
return pos_result;
}
neg_result.insert(neg_result.end(), pos_result.begin(), pos_result.end());
add_nulls(neg_result, null_idx_buff, oe);
return neg_result;
}
template <class K>
std::vector<uint32_t> baseline_sort_int(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const thrust::host_vector<int64_t>& oe_col_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
const auto& entry_ti = get_compact_type(layout.oe_target_info);
std::vector<uint32_t> null_idx_buff;
thrust::host_vector<uint32_t> notnull_idx_buff;
const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0);
null_idx_buff.reserve(slice_entry_count);
notnull_idx_buff.reserve(slice_entry_count);
thrust::host_vector<int64_t> notnull_oe_col_buffer;
notnull_oe_col_buffer.reserve(slice_entry_count);
size_t oe_col_buffer_idx = 0;
for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) {
if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) &&
oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(entry_ti, false)) {
null_idx_buff.push_back(i);
} else {
notnull_idx_buff.push_back(i);
notnull_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
}
}
std::vector<uint32_t> notnull_result;
ThrustAllocator thrust_allocator(data_mgr, device_id);
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_notnull_idx_buff = get_device_copy_ptr(notnull_idx_buff, thrust_allocator);
const auto dev_notnull_oe_col_buffer = get_device_copy_ptr(notnull_oe_col_buffer, thrust_allocator);
notnull_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_notnull_oe_col_buffer,
dev_notnull_oe_col_buffer + notnull_oe_col_buffer.size(),
dev_notnull_idx_buff,
notnull_idx_buff.size(),
oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
notnull_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
notnull_oe_col_buffer.begin(),
notnull_oe_col_buffer.end(),
notnull_idx_buff.begin(),
notnull_idx_buff.size(),
oe,
layout,
top_n);
}
add_nulls(notnull_result, null_idx_buff, oe);
return notnull_result;
}
template <class K>
thrust::host_vector<int64_t> collect_order_entry_column(const int8_t* groupby_buffer,
const GroupByBufferLayoutInfo& layout,
const size_t start,
const size_t step) {
thrust::host_vector<int64_t> oe_col_buffer;
const auto row_ptr = groupby_buffer + start * layout.row_bytes;
auto crt_group_ptr1 =
layout.target_groupby_index >= 0 ? row_ptr + layout.target_groupby_index * sizeof(K) : row_ptr + layout.col_off;
const int8_t* crt_group_ptr2{nullptr};
if (layout.oe_target_info.agg_kind == kAVG) {
crt_group_ptr2 = crt_group_ptr1 + layout.col_bytes;
}
const auto& entry_ti = get_compact_type(layout.oe_target_info);
const bool float_argument_input = takes_float_argument(layout.oe_target_info);
const auto step_bytes = layout.row_bytes * step;
for (size_t i = start; i < layout.entry_count; i += step) {
auto val1 = read_int_from_buff(crt_group_ptr1, layout.col_bytes > 0 ? layout.col_bytes : sizeof(K));
if (crt_group_ptr2) {
const auto val2 = read_int_from_buff(crt_group_ptr2, 8);
const auto avg_val = pair_to_double({val1, val2}, entry_ti, float_argument_input);
val1 = *reinterpret_cast<const int64_t*>(&avg_val);
}
oe_col_buffer.push_back(val1);
crt_group_ptr1 += step_bytes;
if (crt_group_ptr2) {
crt_group_ptr2 += step_bytes;
}
}
return oe_col_buffer;
}
} // namespace
template <class K>
std::vector<uint32_t> baseline_sort(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
auto oe_col_buffer = collect_order_entry_column<K>(groupby_buffer, layout, start, step);
const auto& entry_ti = get_compact_type(layout.oe_target_info);
CHECK(entry_ti.is_number());
if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) {
return baseline_sort_fp<K>(
device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step);
}
// Because of how we represent nulls for integral types, they'd be at the
// wrong position in these two cases. Separate them into a different buffer.
if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) {
return baseline_sort_int<K>(
device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step);
}
ThrustAllocator thrust_allocator(data_mgr, device_id);
// Fastest path, no need to separate nulls away since they'll end up at the
// right place as a side effect of how we're representing nulls.
if (device_type == ExecutorDeviceType::GPU) {
if (oe_col_buffer.empty()) {
return {};
}
const auto dev_idx_buff = get_device_ptr<uint32_t>(oe_col_buffer.size(), thrust_allocator);
thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step);
const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator);
return do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_oe_col_buffer,
dev_oe_col_buffer + oe_col_buffer.size(),
dev_idx_buff,
oe_col_buffer.size(),
oe,
layout,
top_n);
}
CHECK(device_type == ExecutorDeviceType::CPU);
thrust::host_vector<uint32_t> host_idx_buff(oe_col_buffer.size());
thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step);
return do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
oe_col_buffer.begin(),
oe_col_buffer.end(),
host_idx_buff.begin(),
host_idx_buff.size(),
oe,
layout,
top_n);
}
template std::vector<uint32_t> baseline_sort<int32_t>(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step);
template std::vector<uint32_t> baseline_sort<int64_t>(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step);
|
5c1cc38082a5eeaf0f98a9f7428ec8bdb92da1ad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../nvmatrix/include/nvmatrix.cuh"
#include "../include/cudaconv2.cuh"
__device__ __forceinline__ void filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(int fPidx, int imgLoadModPosY, int imgLoadModPosX,
int imgSizeX, int filterSize, int& iPidx) {
int x = imgLoadModPosX + (fPidx) % filterSize;
int y = imgLoadModPosY + (fPidx) / filterSize;
iPidx = y >= 0 && y < imgSizeX && x >= 0 && x < imgSizeX ? y * imgSizeX + x : -1;
}
#define FA_COLOR3_IMPRELOAD(c,i) imPreload[c][i] = iPidxNext < 0 || (checkImgBounds && myImgIdx + i * B_X >= numImages) ? 0 : mm[c * imgPixels * imgStride + i * B_X];
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs,
const bool conv/*, const bool noloads*/) {
__shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numModules = numModulesX * numModulesY;
// Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is
// in the range 0..31. It appears that this allows the compiler to optimize?
const int tx = threadIdx.x % B_X;
const int ty = threadIdx.y % B_Y;
const int tidx = ty * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters += blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) { // NOTE: UNTESTED!
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules
+ myImgIdx;
float prod[imgsPerThread][filtersPerThread];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] = 0;
}
}
int iPidxNext;
float imPreload[numColors][imgsPerThread];
float fPreload[numColors][pixelCache*filtersPerThread/B_X];
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int p = 0; p < pixelCache; p += B_X/filtersPerThread) {
if (p + shFilterLoadY < filterPixels) {
fPreload[c][p*filtersPerThread/B_X] = filters[p * numFilters + c * numFilters * filterPixels];
} else{
fPreload[c][p*filtersPerThread/B_X] = 0;
}
}
}
filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext);
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) {
imPreload[c][i] = images[(c * imgPixels + iPidxNext) * imgStride + i * B_X];
} else {
imPreload[c][i] = 0;
}
}
}
for (int p = 0; p < filterPixels; p += pixelCache) {
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i];
}
}
const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache;
filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext);
const float* ff = &filters[numFilters * fPidxNext];
const float* mm = &images[imgStride * iPidxNext];
FA_COLOR3_IMPRELOAD(1,0);
FA_COLOR3_IMPRELOAD(1,1);
FA_COLOR3_IMPRELOAD(1,2);
FA_COLOR3_IMPRELOAD(1,3);
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int pp = 0; pp < pixelCache; pp += B_X/filtersPerThread) {
shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp*filtersPerThread/B_X];
}
}
__syncthreads();
FA_COLOR3_IMPRELOAD(0,0);
FA_COLOR3_IMPRELOAD(0,1);
FA_COLOR3_IMPRELOAD(0,2);
FA_COLOR3_IMPRELOAD(0,3);
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int pp = 0; pp < 2; pp++) {
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f];
}
}
}
}
FA_COLOR3_IMPRELOAD(2,0);
FA_COLOR3_IMPRELOAD(2,1);
FA_COLOR3_IMPRELOAD(2,2);
FA_COLOR3_IMPRELOAD(2,3);
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int pp = 2; pp < pixelCache; pp++) {
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f];
}
}
}
#pragma unroll
for (int pp = 0; pp < 2; pp++) {
fPreload[c][pp] = fPidxNext + pp*(B_X/filtersPerThread) + shFilterLoadY >= filterPixels ? 0 : ff[c * numFilters* filterPixels + pp*(B_X/filtersPerThread) * numFilters];
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f];
}
}
}
} else {
// Note: reversing order of these loops saves 2 registers, but costs time
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
* This won't be pretty.
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs,
const bool conv/*, const bool noloads*/) {
__shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numModules = numModulesX * numModulesY;
// Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is
// in the range 0..31. It appears that this allows the compiler to optimize?
const int tx = threadIdx.x % B_X;
const int ty = threadIdx.y % B_Y;
const int tidx = ty * B_X + threadIdx.x;
const int warp = tidx / 32;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters += blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) { // NOTE: UNTESTED!
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules
+ myImgIdx;
float prod[imgsPerThread][filtersPerThread];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] = 0;
}
}
int iPidxNext;
float imPreload[numColors][imgsPerThread];
float fPreload[numColors][DIVUP(pixelCache*filtersPerThread,B_X)];
if (warp < 3) {
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int p = 0; p < pixelCache; p += 2) {
if (p + shFilterLoadY < filterPixels) {
fPreload[c][p/2] = filters[p * numFilters + c * numFilters * filterPixels];
} else {
fPreload[c][p/2] = 0;
}
}
}
}
filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext);
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) {
imPreload[c][i] = images[(c * imgPixels + iPidxNext) * imgStride + i * B_X];
} else {
imPreload[c][i] = 0;
}
}
}
for (int p = 0; p < filterPixels; p += pixelCache) {
const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache;
filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext);
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i];
}
}
if (warp < 3) {
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int pp = 0; pp < pixelCache; pp += 2) {
shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp/2];
}
}
}
__syncthreads();
const float* ff = &filters[numFilters * fPidxNext];
const float* mm = &images[imgStride * iPidxNext];
FA_COLOR3_IMPRELOAD(2,2);
#pragma unroll
for (int pp = 0; pp < 2; pp++) {
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[0][pp][tx * imgsPerThread + i] * shFilters[0][pp][ty * filtersPerThread + f];
}
}
}
FA_COLOR3_IMPRELOAD(0,0);
FA_COLOR3_IMPRELOAD(2,0);
FA_COLOR3_IMPRELOAD(1,0);
#pragma unroll
for (int pp = 0; pp < 2; pp++) {
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[1][pp][tx * imgsPerThread + i] * shFilters[1][pp][ty * filtersPerThread + f];
}
}
}
FA_COLOR3_IMPRELOAD(0,3);
FA_COLOR3_IMPRELOAD(1,3);
FA_COLOR3_IMPRELOAD(2,3);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[2][0][tx * imgsPerThread + i] * shFilters[2][0][ty * filtersPerThread + f];
}
}
FA_COLOR3_IMPRELOAD(1,2);
FA_COLOR3_IMPRELOAD(0,2);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[2][1][tx * imgsPerThread + i] * shFilters[2][1][ty * filtersPerThread + f];
}
}
FA_COLOR3_IMPRELOAD(1,1);
FA_COLOR3_IMPRELOAD(0,1);
FA_COLOR3_IMPRELOAD(2,1);
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int pp = 2; pp < pixelCache; pp++) {
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f];
}
}
}
#pragma unroll
for (int pp = 0; pp < 2; pp++) {
fPreload[c][pp] = warp >= 3 || fPidxNext + pp*2 + shFilterLoadY >= filterPixels ? 0 : ff[c * numFilters* filterPixels + pp*2 * numFilters];
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f];
}
}
}
} else {
// Note: reversing order of these loops costs 2 registers, but saves time
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f];
}
}
}
}
}
__device__ inline void filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(int filterSize, int imgSizeX, int imgLoadModPosY, int imgLoadModPosX, int imgY, int imgX, int& fPidx, int& iPidx) {
int filterPxY = imgY - imgLoadModPosY;
int filterPxX = imgX - imgLoadModPosX;
fPidx = filterPxY * filterSize + filterPxX;
iPidx = imgY * imgSizeX + imgX; // Pixel index in img
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
* Note: in git there's a 1.5% faster version of this which sues 167 registers instead of 154...
* it's basically the same thing, but it doesn't do the next-pixel computation. It just avoids
* pre-loading when it rolls over to the next pixel.
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv/*, const bool noloads*/) {
__shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
// Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is
// in the range 0..31. It appears that this allows the compiler to optimize?
const int tx = threadIdx.x % B_X;
const int ty = threadIdx.y % B_Y;
const int tidx = ty * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters * filterPixels + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules
+ myImgIdx;
float prod[imgsPerThread][filtersPerThread];
// float fCache[filtersPerThread];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] = 0;
}
}
// NOTE: these max/min functions increase register usage as compared to my macros
const int imgStartX = max(0, imgLoadModPosX);
const int imgStartY = max(0, imgLoadModPosY);
const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX);
const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY);
// __shared__ int imgPos[]
int fPidx, iPidx;
float imPreload[imgsPerThread];
float fPreload[colorCache*filtersPerThread/B_X];
// float fCache[filtersPerThread];
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
imPreload[i] = images[imgStride * iPidx + i * B_X];
} else {
imPreload[i] = 0;
}
}
if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage..
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
fPreload[c*filtersPerThread/B_X] = filters[(c * filterPixels + fPidx) * numFilters];
}
}
for (int imgY = imgStartY; imgY < imgEndY; ++imgY) {
// const int filterPxY = imgY - imgLoadModPosY;
for (int imgX = imgStartX; imgX < imgEndX; ++imgX) {
// const int filterPxX = imgX - imgLoadModPosX;
// const int p = filterPxY * filterSize + filterPxX;
// const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img
// setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx);
// float* m = &images[imgStride * pixIdx];
const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1;
int imgYNext = imgY;
int imgXNext = imgX;
int fPidxNext, iPidxNext;
if (!lastPixel) {
imgYNext = imgY + (imgX + 1 == imgEndX);
imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1;
}
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext);
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)];
const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)];
if (oc == numFilterColors - colorCache) {
ff = &filters[fPidxNext * numFilters];
mm = &images[iPidxNext * imgStride];
fPidx = fPidxNext;
iPidx = iPidxNext;
}
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X];
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
shImages[ty][tx * imgsPerThread + i] = imPreload[i];
}
imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : mm[0 * B_X];
imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : mm[1 * B_X];
imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : mm[2 * B_X];
__syncthreads();
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f];
}
}
fPreload[0] = ff[0];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f];
}
}
fPreload[1] = ff[(B_X/filtersPerThread * filterPixels) * numFilters];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f];
}
}
imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : mm[3 * B_X];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f];
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f];
}
}
}
} else {
// Note: reversing order of these loops saves 2 registers, but costs time
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex(hipTextureObject_t images, hipTextureObject_t filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv/*, const bool noloads*/) {
__shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
// Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is
// in the range 0..31. It appears that this allows the compiler to optimize?
const int tx = threadIdx.x % B_X;
const int ty = threadIdx.y % B_Y;
const int tidx = ty * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
const int imgOffset = (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
// images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
const int filterOffset = blockFilterIdx
+ shFilterLoadY * numFilters * filterPixels + shFilterLoadX + (conv ? 0 : moduleIdx * numFilterColors * filterPixels * numFilters);
// filters +=blockFilterIdx
// + shFilterLoadY * numFilters * filterPixels + shFilterLoadX;
// if (!conv) {
// filters += moduleIdx * numFilterColors * filterPixels * numFilters;
// }
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules
+ myImgIdx;
float prod[imgsPerThread][filtersPerThread];
// float fCache[filtersPerThread];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] = 0;
}
}
// NOTE: these max/min functions increase register usage as compared to my macros
const int imgStartX = max(0, imgLoadModPosX);
const int imgStartY = max(0, imgLoadModPosY);
const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX);
const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY);
// __shared__ int imgPos[]
int fPidx, iPidx;
float imPreload[imgsPerThread]; // [4]
float fPreload[colorCache*filtersPerThread/B_X]; // [2]
// float fCache[filtersPerThread];
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
imPreload[i] = tex1Dfetch<float>(images, imgOffset + imgStride * iPidx + i * B_X);
} else {
imPreload[i] = 0;
}
}
if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage..
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
fPreload[c*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filterOffset + (c * filterPixels + fPidx) * numFilters);
}
}
for (int imgY = imgStartY; imgY < imgEndY; ++imgY) {
// const int filterPxY = imgY - imgLoadModPosY;
for (int imgX = imgStartX; imgX < imgEndX; ++imgX) {
// const int filterPxX = imgX - imgLoadModPosX;
// const int p = filterPxY * filterSize + filterPxX;
// const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img
// setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx);
// float* m = &images[imgStride * pixIdx];
const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1;
int imgYNext = imgY;
int imgXNext = imgX;
int fPidxNext, iPidxNext;
if (!lastPixel) {
imgYNext = imgY + (imgX + 1 == imgEndX);
imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1;
}
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext);
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
// const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)];
// const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)];
int imgOffset2 = imgOffset + imgStride * ((oc + colorCache) * imgPixels + iPidx);
int filterOffset2 = filterOffset + numFilters * ((oc + colorCache) * filterPixels + fPidx);
if (oc == numFilterColors - colorCache) {
filterOffset2 = filterOffset + fPidxNext * numFilters;
imgOffset2 = imgOffset + iPidxNext * imgStride;
fPidx = fPidxNext;
iPidx = iPidxNext;
}
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X];
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
shImages[ty][tx * imgsPerThread + i] = imPreload[i];
}
imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 0 * B_X);
imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 1 * B_X);
imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 2 * B_X);
__syncthreads();
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f];
}
}
fPreload[0] = tex1Dfetch<float>(filters, filterOffset2 + 0);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f];
}
}
fPreload[1] = tex1Dfetch<float>(filters, filterOffset2 + (B_X/filtersPerThread * filterPixels) * numFilters);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f];
}
}
imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 3 * B_X);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f];
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f];
}
}
}
} else {
// Note: reversing order of these loops saves 2 registers, but costs time
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f];
}
}
}
}
}
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of module and B_Y * filtersPerThread
*
* images: (numColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numColors, filterPixels, numFilters) if conv
* (numModules, numColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
*
* Number of filters per module should be divisible by B_Y * filtersPerThread
* checkImgBounds indicates whether number of images is divisible by B_X * imgsPerThread
*
* The imgSize here is the size of the actual image without the padding.
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[pixelCache*numColors][B_Y * filtersPerThread]; // pre-load pixelCache pixels from B_Y*filtersPerThread filters
__shared__ float shImages[pixelCache*numColors][B_X * imgsPerThread]; // pre-load pixelCache pixels from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = blockIdx.y % blocksPerModule;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int numModules = numModulesY * numModulesX;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters += filtersPerThread * B_Y * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y*filtersPerThread) * numImages * numModulesY * numModulesX
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
//float* shImgLoad = &shImages[0][threadIdx.x];
for (int p = 0; p < filterPixels; p += pixelCache) {
/*
* Load pixelCache pixels from B_Y*filtersPerThread filters
* This condition covers the case when B_X is not divisible by filtersPerThread.
* In this case, not all of the threads will participate in the loading operation.
* This ensures that in each loop iteration, an integer number of rows of shFilters
* are filled, which makes indexing simple.
*/
if (B_X % filtersPerThread == 0 || shFilterLoadY < B_X/filtersPerThread) {
#pragma unroll
for (int p2 = 0; p2 < pixelCache; p2 += B_X/filtersPerThread) {
const bool omit = pixelCache % (B_X / filtersPerThread) == 0;
const int preloadPx = shFilterLoadY + p2;
if (omit || preloadPx < pixelCache) {
if (p + preloadPx < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = 0;
}
}
}
}
}
/*
* Load pixelCache pixels from B_X*imgsPerThread images.
*/
#pragma unroll
for (int ly = 0; ly < pixelCache; ly += B_Y) {
const int preloadPx = ly + threadIdx.y;
const int pixIdx = p + preloadPx;
const bool omit = pixelCache % B_Y == 0; // Compile-time condition
/*
* Don't load any image pixels corresponding to filter pixels that don't exist.
*/
if (pixIdx < filterPixels && (omit || preloadPx < pixelCache)) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (y * imgSizeX + x)];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = m[c * imgStride * imgPixels + i * B_X];
} else {
shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0;
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < pixelCache*numColors; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g + threadIdx.x * imgsPerThread] * shFilters[i][threadIdx.y * filtersPerThread + f];
}
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
targets[g * B_X + f * numImages * numModules] = scaleTargets * targets[g * B_X + f * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of B_Y * filtersPerThread
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
* B_Y one of 4, 8, 16
* B_X one of 16, 32
* imgsPerThread one of 1, 2, 4
* filtersPerThread one of 1, 2, 4, 8
* colorCache: how many colors to put into shmem
*
* numFilters should be divisible by B_Y * filtersPerThread
* numImages be divisible by B_X * imgsPerThread
* numFilterColors should be divisible by colorCache.
* numImgColors must be even.
* numFilters must be divisible by numGroups.
* no restrictions on pixelCache
* The imgSize here is the size of the actual image without the padding.
* As always, try to make B_X * imgsPerThread == B_Y * filtersPerThread for maximum efficiency.
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse2(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters * filterPixels + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y) * numImages * numModules
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
const int imgStartX = MAX(0, imgLoadModPosX);
const int imgStartY = MAX(0, imgLoadModPosY);
const int imgEndX = MIN(imgLoadModPosX + filterSize, imgSizeX);
const int imgEndY = MIN(imgLoadModPosY + filterSize, imgSizeY);
// __shared__ int imgPos[]
for (int imgY = imgStartY; imgY < imgEndY; ++imgY) {
const int filterPxY = imgY - imgLoadModPosY;
for (int imgX = imgStartX; imgX < imgEndX; ++imgX) {
const int filterPxX = imgX - imgLoadModPosX;
const int p = filterPxY * filterSize + filterPxX;
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
/*
* Load a pixel from B_Y*filtersPerThread filters
* This condition covers the case when B_X is not divisible by filtersPerThread.
* In this case, not all of the threads will participate in the loading operation.
* This ensures that in each loop iteration, an integer number of rows of shFilters
* are filled, which makes indexing simple.
* nvcc is behaving in a completely insane way: removing this condition under
* template parameters that guarantee it to be true actually slows down
* the computation.
*
*/
if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) {
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
if (colorCache % (B_X/filtersPerThread) == 0 || c + shFilterLoadY < colorCache) {
shFilters[c + shFilterLoadY][shFilterLoadX] = filters[((oc+c) * filterPixels + p) * numFilters];
}
}
}
/*
* Load a pixel from B_X*imgsPerThread images.
*/
const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img
float* m = &images[imgStride * (oc * imgPixels + pixIdx)];
#pragma unroll
for (int c = 0; c < colorCache; c += B_Y) {
if (colorCache % B_Y == 0 || threadIdx.y + c < colorCache) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
shImages[c + threadIdx.y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X];
} else {
shImages[c + threadIdx.y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
for (int c = 0; c < colorCache; c++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[f][g] += shImages[c][g * B_X + threadIdx.x] * shFilters[c][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
// Note: reversing order of these loops saves 2 registers, but costs time
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModules, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _filterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numFilters = filters.getNumCols();
int numModules = numModulesY * numModulesX;
int numImages = images.getNumCols();
int imgPixels = images.getNumRows()/numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int filterModuleMult = conv ? 1 : numModules;
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(numFilters % (16 * numGroups) == 0);
assert(numImgColors % numGroups == 0);
//images.printShape("images");
//printf("rows: %d, pixels: %d, colors: %d\n", images.getNumRows(), imgPixels, numImgColors);
//images.printShape("images");
assert(images.getNumRows() == imgPixels * numImgColors);
assert(imgSizeY * imgSizeX == imgPixels);
int numFiltersPerGroup = numFilters / numGroups;
int imgStride = images.getStride(); // images does not need to be a contiguous matrix
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = int(sqrt(filterPixels));
assert(filterSize * filterSize == filterPixels);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(!images.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
assert(filters.isContiguous());
assert(targets.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int filtersPerThread, threadsY = 4;
if (numImgColors <= 3) {
// Special kernels written for colors = 3, filters = 64 and colors = 3, filters = 48 cases.
// The remaining cases use the old routines.
// TODO: Modernize the remaining cases if you care about them.
filtersPerThread = numFiltersPerGroup % 64 == 0 ? 16 : numFiltersPerGroup % 48 == 0 ? 12 : numFiltersPerGroup % 32 == 0 ? 8 : 4;
} else {
filtersPerThread = numFiltersPerGroup % 64 == 0 ? 16 : numFiltersPerGroup % 32 == 0 ? 8 : 4;
threadsY = numFiltersPerGroup % 128 == 0 && numFilterColors % 8 == 0 && imgsPerThread != 4 ? 8 : 4;
}
int threadsX = 32;
dim3 threads(threadsX, threadsY);
dim3 blocks = dim3(DIVUP(numImages, threads.x * imgsPerThread), (numModules * numFilters) / (threads.y * filtersPerThread));
bool checkImgBounds = numImages % (threads.x*imgsPerThread) != 0;
bool scale = scaleTargets != 0;
if (scaleTargets == 0) {
targets.resize(numFilters * numModules, numImages);
} else {
assert(targets.getNumRows() == numFilters * numModules);
assert(targets.getNumCols() == numImages);
}
hipStream_t stream = NVMatrix::getDefaultStream();
// Auto-generated calling code...
// NOTE: The calling code is set up such that if checkImgBounds is true, then imgsPerThread = 1.
// In principle it doesn't have to be this way, and you may want to optimize for that case.
if (scale == false) {
if (checkImgBounds == false) {
if (numFilterColors % 8 == 0) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if (images.getNumDataBytes() < TEXTURE_SIZE_MAX) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferL1);
hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferL1);
hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numFiltersPerGroup % 64 == 0) {
if (images.getNumDataBytes() < TEXTURE_SIZE_MAX) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferL1);
hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferL1);
hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors % 4 == 0) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 3) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3 < 4, 32, 4, 16, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3 < 4, 32, 4, 16, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3 < 4, 32, 4, 12, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3 < 4, 32, 4, 12, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 2) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 16, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 12, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 1) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 16, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 12, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
else if (checkImgBounds == true) {
if (numFilterColors % 8 == 0) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors % 4 == 0) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 3) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 2) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 1) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
}
else if (scale == true) {
if (checkImgBounds == false) {
if (numFilterColors % 8 == 0) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if (images.getNumDataBytes() < TEXTURE_SIZE_MAX) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferL1);
hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferL1);
hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numFiltersPerGroup % 64 == 0) {
if (images.getNumDataBytes() < TEXTURE_SIZE_MAX) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferL1);
hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
hipFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferL1);
hipLaunchKernelGGL(( filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors % 4 == 0) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 3) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3 < 4, 32, 4, 16, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3 < 4, 32, 4, 16, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3 < 4, 32, 4, 12, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3 < 4, 32, 4, 12, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 2) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 16, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 12, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 1) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 16, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 12, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 16, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 12, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, false >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
else if (checkImgBounds == true) {
if (numFilterColors % 8 == 0) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors % 4 == 0) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 3) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 2) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 1) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
hipFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, true >) , dim3(blocks), dim3(threads), 0, stream, images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
}
getLastCudaError("filterActs: kernel execution failed");
}
void convFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups) {
convFilterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1);
}
void convFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups) {
localFilterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1);
}
void localFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
|
5c1cc38082a5eeaf0f98a9f7428ec8bdb92da1ad.cu
|
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../nvmatrix/include/nvmatrix.cuh"
#include "../include/cudaconv2.cuh"
__device__ __forceinline__ void filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(int fPidx, int imgLoadModPosY, int imgLoadModPosX,
int imgSizeX, int filterSize, int& iPidx) {
int x = imgLoadModPosX + (fPidx) % filterSize;
int y = imgLoadModPosY + (fPidx) / filterSize;
iPidx = y >= 0 && y < imgSizeX && x >= 0 && x < imgSizeX ? y * imgSizeX + x : -1;
}
#define FA_COLOR3_IMPRELOAD(c,i) imPreload[c][i] = iPidxNext < 0 || (checkImgBounds && myImgIdx + i * B_X >= numImages) ? 0 : mm[c * imgPixels * imgStride + i * B_X];
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs,
const bool conv/*, const bool noloads*/) {
__shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numModules = numModulesX * numModulesY;
// Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is
// in the range 0..31. It appears that this allows the compiler to optimize?
const int tx = threadIdx.x % B_X;
const int ty = threadIdx.y % B_Y;
const int tidx = ty * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters += blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) { // NOTE: UNTESTED!
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules
+ myImgIdx;
float prod[imgsPerThread][filtersPerThread];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] = 0;
}
}
int iPidxNext;
float imPreload[numColors][imgsPerThread];
float fPreload[numColors][pixelCache*filtersPerThread/B_X];
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int p = 0; p < pixelCache; p += B_X/filtersPerThread) {
if (p + shFilterLoadY < filterPixels) {
fPreload[c][p*filtersPerThread/B_X] = filters[p * numFilters + c * numFilters * filterPixels];
} else{
fPreload[c][p*filtersPerThread/B_X] = 0;
}
}
}
filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext);
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) {
imPreload[c][i] = images[(c * imgPixels + iPidxNext) * imgStride + i * B_X];
} else {
imPreload[c][i] = 0;
}
}
}
for (int p = 0; p < filterPixels; p += pixelCache) {
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i];
}
}
const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache;
filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext);
const float* ff = &filters[numFilters * fPidxNext];
const float* mm = &images[imgStride * iPidxNext];
FA_COLOR3_IMPRELOAD(1,0);
FA_COLOR3_IMPRELOAD(1,1);
FA_COLOR3_IMPRELOAD(1,2);
FA_COLOR3_IMPRELOAD(1,3);
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int pp = 0; pp < pixelCache; pp += B_X/filtersPerThread) {
shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp*filtersPerThread/B_X];
}
}
__syncthreads();
FA_COLOR3_IMPRELOAD(0,0);
FA_COLOR3_IMPRELOAD(0,1);
FA_COLOR3_IMPRELOAD(0,2);
FA_COLOR3_IMPRELOAD(0,3);
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int pp = 0; pp < 2; pp++) {
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f];
}
}
}
}
FA_COLOR3_IMPRELOAD(2,0);
FA_COLOR3_IMPRELOAD(2,1);
FA_COLOR3_IMPRELOAD(2,2);
FA_COLOR3_IMPRELOAD(2,3);
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int pp = 2; pp < pixelCache; pp++) {
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f];
}
}
}
#pragma unroll
for (int pp = 0; pp < 2; pp++) {
fPreload[c][pp] = fPidxNext + pp*(B_X/filtersPerThread) + shFilterLoadY >= filterPixels ? 0 : ff[c * numFilters* filterPixels + pp*(B_X/filtersPerThread) * numFilters];
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f];
}
}
}
} else {
// Note: reversing order of these loops saves 2 registers, but costs time
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
* This won't be pretty.
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs,
const bool conv/*, const bool noloads*/) {
__shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numModules = numModulesX * numModulesY;
// Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is
// in the range 0..31. It appears that this allows the compiler to optimize?
const int tx = threadIdx.x % B_X;
const int ty = threadIdx.y % B_Y;
const int tidx = ty * B_X + threadIdx.x;
const int warp = tidx / 32;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters += blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) { // NOTE: UNTESTED!
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules
+ myImgIdx;
float prod[imgsPerThread][filtersPerThread];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] = 0;
}
}
int iPidxNext;
float imPreload[numColors][imgsPerThread];
float fPreload[numColors][DIVUP(pixelCache*filtersPerThread,B_X)];
if (warp < 3) {
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int p = 0; p < pixelCache; p += 2) {
if (p + shFilterLoadY < filterPixels) {
fPreload[c][p/2] = filters[p * numFilters + c * numFilters * filterPixels];
} else {
fPreload[c][p/2] = 0;
}
}
}
}
filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext);
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) {
imPreload[c][i] = images[(c * imgPixels + iPidxNext) * imgStride + i * B_X];
} else {
imPreload[c][i] = 0;
}
}
}
for (int p = 0; p < filterPixels; p += pixelCache) {
const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache;
filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext);
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i];
}
}
if (warp < 3) {
#pragma unroll
for (int c = 0; c < numColors; ++c) {
#pragma unroll
for (int pp = 0; pp < pixelCache; pp += 2) {
shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp/2];
}
}
}
__syncthreads();
const float* ff = &filters[numFilters * fPidxNext];
const float* mm = &images[imgStride * iPidxNext];
FA_COLOR3_IMPRELOAD(2,2);
#pragma unroll
for (int pp = 0; pp < 2; pp++) {
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[0][pp][tx * imgsPerThread + i] * shFilters[0][pp][ty * filtersPerThread + f];
}
}
}
FA_COLOR3_IMPRELOAD(0,0);
FA_COLOR3_IMPRELOAD(2,0);
FA_COLOR3_IMPRELOAD(1,0);
#pragma unroll
for (int pp = 0; pp < 2; pp++) {
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[1][pp][tx * imgsPerThread + i] * shFilters[1][pp][ty * filtersPerThread + f];
}
}
}
FA_COLOR3_IMPRELOAD(0,3);
FA_COLOR3_IMPRELOAD(1,3);
FA_COLOR3_IMPRELOAD(2,3);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[2][0][tx * imgsPerThread + i] * shFilters[2][0][ty * filtersPerThread + f];
}
}
FA_COLOR3_IMPRELOAD(1,2);
FA_COLOR3_IMPRELOAD(0,2);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[2][1][tx * imgsPerThread + i] * shFilters[2][1][ty * filtersPerThread + f];
}
}
FA_COLOR3_IMPRELOAD(1,1);
FA_COLOR3_IMPRELOAD(0,1);
FA_COLOR3_IMPRELOAD(2,1);
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int pp = 2; pp < pixelCache; pp++) {
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f];
}
}
}
#pragma unroll
for (int pp = 0; pp < 2; pp++) {
fPreload[c][pp] = warp >= 3 || fPidxNext + pp*2 + shFilterLoadY >= filterPixels ? 0 : ff[c * numFilters* filterPixels + pp*2 * numFilters];
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f];
}
}
}
} else {
// Note: reversing order of these loops costs 2 registers, but saves time
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f];
}
}
}
}
}
__device__ inline void filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(int filterSize, int imgSizeX, int imgLoadModPosY, int imgLoadModPosX, int imgY, int imgX, int& fPidx, int& iPidx) {
int filterPxY = imgY - imgLoadModPosY;
int filterPxX = imgX - imgLoadModPosX;
fPidx = filterPxY * filterSize + filterPxX;
iPidx = imgY * imgSizeX + imgX; // Pixel index in img
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
* Note: in git there's a 1.5% faster version of this which sues 167 registers instead of 154...
* it's basically the same thing, but it doesn't do the next-pixel computation. It just avoids
* pre-loading when it rolls over to the next pixel.
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv/*, const bool noloads*/) {
__shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
// Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is
// in the range 0..31. It appears that this allows the compiler to optimize?
const int tx = threadIdx.x % B_X;
const int ty = threadIdx.y % B_Y;
const int tidx = ty * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters * filterPixels + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules
+ myImgIdx;
float prod[imgsPerThread][filtersPerThread];
// float fCache[filtersPerThread];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] = 0;
}
}
// NOTE: these max/min functions increase register usage as compared to my macros
const int imgStartX = max(0, imgLoadModPosX);
const int imgStartY = max(0, imgLoadModPosY);
const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX);
const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY);
// __shared__ int imgPos[]
int fPidx, iPidx;
float imPreload[imgsPerThread];
float fPreload[colorCache*filtersPerThread/B_X];
// float fCache[filtersPerThread];
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
imPreload[i] = images[imgStride * iPidx + i * B_X];
} else {
imPreload[i] = 0;
}
}
if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage..
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
fPreload[c*filtersPerThread/B_X] = filters[(c * filterPixels + fPidx) * numFilters];
}
}
for (int imgY = imgStartY; imgY < imgEndY; ++imgY) {
// const int filterPxY = imgY - imgLoadModPosY;
for (int imgX = imgStartX; imgX < imgEndX; ++imgX) {
// const int filterPxX = imgX - imgLoadModPosX;
// const int p = filterPxY * filterSize + filterPxX;
// const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img
// setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx);
// float* m = &images[imgStride * pixIdx];
const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1;
int imgYNext = imgY;
int imgXNext = imgX;
int fPidxNext, iPidxNext;
if (!lastPixel) {
imgYNext = imgY + (imgX + 1 == imgEndX);
imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1;
}
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext);
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)];
const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)];
if (oc == numFilterColors - colorCache) {
ff = &filters[fPidxNext * numFilters];
mm = &images[iPidxNext * imgStride];
fPidx = fPidxNext;
iPidx = iPidxNext;
}
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X];
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
shImages[ty][tx * imgsPerThread + i] = imPreload[i];
}
imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : mm[0 * B_X];
imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : mm[1 * B_X];
imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : mm[2 * B_X];
__syncthreads();
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f];
}
}
fPreload[0] = ff[0];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f];
}
}
fPreload[1] = ff[(B_X/filtersPerThread * filterPixels) * numFilters];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f];
}
}
imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : mm[3 * B_X];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f];
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f];
}
}
}
} else {
// Note: reversing order of these loops saves 2 registers, but costs time
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex(cudaTextureObject_t images, cudaTextureObject_t filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv/*, const bool noloads*/) {
__shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
// Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is
// in the range 0..31. It appears that this allows the compiler to optimize?
const int tx = threadIdx.x % B_X;
const int ty = threadIdx.y % B_Y;
const int tidx = ty * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
const int imgOffset = (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
// images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
const int filterOffset = blockFilterIdx
+ shFilterLoadY * numFilters * filterPixels + shFilterLoadX + (conv ? 0 : moduleIdx * numFilterColors * filterPixels * numFilters);
// filters +=blockFilterIdx
// + shFilterLoadY * numFilters * filterPixels + shFilterLoadX;
// if (!conv) {
// filters += moduleIdx * numFilterColors * filterPixels * numFilters;
// }
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules
+ myImgIdx;
float prod[imgsPerThread][filtersPerThread];
// float fCache[filtersPerThread];
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] = 0;
}
}
// NOTE: these max/min functions increase register usage as compared to my macros
const int imgStartX = max(0, imgLoadModPosX);
const int imgStartY = max(0, imgLoadModPosY);
const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX);
const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY);
// __shared__ int imgPos[]
int fPidx, iPidx;
float imPreload[imgsPerThread]; // [4]
float fPreload[colorCache*filtersPerThread/B_X]; // [2]
// float fCache[filtersPerThread];
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
imPreload[i] = tex1Dfetch<float>(images, imgOffset + imgStride * iPidx + i * B_X);
} else {
imPreload[i] = 0;
}
}
if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage..
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
fPreload[c*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filterOffset + (c * filterPixels + fPidx) * numFilters);
}
}
for (int imgY = imgStartY; imgY < imgEndY; ++imgY) {
// const int filterPxY = imgY - imgLoadModPosY;
for (int imgX = imgStartX; imgX < imgEndX; ++imgX) {
// const int filterPxX = imgX - imgLoadModPosX;
// const int p = filterPxY * filterSize + filterPxX;
// const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img
// setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx);
// float* m = &images[imgStride * pixIdx];
const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1;
int imgYNext = imgY;
int imgXNext = imgX;
int fPidxNext, iPidxNext;
if (!lastPixel) {
imgYNext = imgY + (imgX + 1 == imgEndX);
imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1;
}
filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext);
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
// const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)];
// const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)];
int imgOffset2 = imgOffset + imgStride * ((oc + colorCache) * imgPixels + iPidx);
int filterOffset2 = filterOffset + numFilters * ((oc + colorCache) * filterPixels + fPidx);
if (oc == numFilterColors - colorCache) {
filterOffset2 = filterOffset + fPidxNext * numFilters;
imgOffset2 = imgOffset + iPidxNext * imgStride;
fPidx = fPidxNext;
iPidx = iPidxNext;
}
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X];
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
shImages[ty][tx * imgsPerThread + i] = imPreload[i];
}
imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 0 * B_X);
imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 1 * B_X);
imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 2 * B_X);
__syncthreads();
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f];
}
}
fPreload[0] = tex1Dfetch<float>(filters, filterOffset2 + 0);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f];
}
}
fPreload[1] = tex1Dfetch<float>(filters, filterOffset2 + (B_X/filtersPerThread * filterPixels) * numFilters);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f];
}
}
imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 3 * B_X);
#pragma unroll
for(int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f];
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f];
}
}
}
} else {
// Note: reversing order of these loops saves 2 registers, but costs time
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f];
}
}
}
}
}
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of module and B_Y * filtersPerThread
*
* images: (numColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numColors, filterPixels, numFilters) if conv
* (numModules, numColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
*
* Number of filters per module should be divisible by B_Y * filtersPerThread
* checkImgBounds indicates whether number of images is divisible by B_X * imgsPerThread
*
* The imgSize here is the size of the actual image without the padding.
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, int pixelCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[pixelCache*numColors][B_Y * filtersPerThread]; // pre-load pixelCache pixels from B_Y*filtersPerThread filters
__shared__ float shImages[pixelCache*numColors][B_X * imgsPerThread]; // pre-load pixelCache pixels from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = blockIdx.y % blocksPerModule;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int numModules = numModulesY * numModulesX;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters += filtersPerThread * B_Y * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y*filtersPerThread) * numImages * numModulesY * numModulesX
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
//float* shImgLoad = &shImages[0][threadIdx.x];
for (int p = 0; p < filterPixels; p += pixelCache) {
/*
* Load pixelCache pixels from B_Y*filtersPerThread filters
* This condition covers the case when B_X is not divisible by filtersPerThread.
* In this case, not all of the threads will participate in the loading operation.
* This ensures that in each loop iteration, an integer number of rows of shFilters
* are filled, which makes indexing simple.
*/
if (B_X % filtersPerThread == 0 || shFilterLoadY < B_X/filtersPerThread) {
#pragma unroll
for (int p2 = 0; p2 < pixelCache; p2 += B_X/filtersPerThread) {
const bool omit = pixelCache % (B_X / filtersPerThread) == 0;
const int preloadPx = shFilterLoadY + p2;
if (omit || preloadPx < pixelCache) {
if (p + preloadPx < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = 0;
}
}
}
}
}
/*
* Load pixelCache pixels from B_X*imgsPerThread images.
*/
#pragma unroll
for (int ly = 0; ly < pixelCache; ly += B_Y) {
const int preloadPx = ly + threadIdx.y;
const int pixIdx = p + preloadPx;
const bool omit = pixelCache % B_Y == 0; // Compile-time condition
/*
* Don't load any image pixels corresponding to filter pixels that don't exist.
*/
if (pixIdx < filterPixels && (omit || preloadPx < pixelCache)) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (y * imgSizeX + x)];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = m[c * imgStride * imgPixels + i * B_X];
} else {
shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0;
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < pixelCache*numColors; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g + threadIdx.x * imgsPerThread] * shFilters[i][threadIdx.y * filtersPerThread + f];
}
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
targets[g * B_X + f * numImages * numModules] = scaleTargets * targets[g * B_X + f * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of B_Y * filtersPerThread
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModulesY, numModulesX, numImages)
*
* B_Y one of 4, 8, 16
* B_X one of 16, 32
* imgsPerThread one of 1, 2, 4
* filtersPerThread one of 1, 2, 4, 8
* colorCache: how many colors to put into shmem
*
* numFilters should be divisible by B_Y * filtersPerThread
* numImages be divisible by B_X * imgsPerThread
* numFilterColors should be divisible by colorCache.
* numImgColors must be even.
* numFilters must be divisible by numGroups.
* no restrictions on pixelCache
* The imgSize here is the size of the actual image without the padding.
* As always, try to make B_X * imgsPerThread == B_Y * filtersPerThread for maximum efficiency.
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse2(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters
__shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters * filterPixels + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y) * numImages * numModules
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
const int imgStartX = MAX(0, imgLoadModPosX);
const int imgStartY = MAX(0, imgLoadModPosY);
const int imgEndX = MIN(imgLoadModPosX + filterSize, imgSizeX);
const int imgEndY = MIN(imgLoadModPosY + filterSize, imgSizeY);
// __shared__ int imgPos[]
for (int imgY = imgStartY; imgY < imgEndY; ++imgY) {
const int filterPxY = imgY - imgLoadModPosY;
for (int imgX = imgStartX; imgX < imgEndX; ++imgX) {
const int filterPxX = imgX - imgLoadModPosX;
const int p = filterPxY * filterSize + filterPxX;
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
/*
* Load a pixel from B_Y*filtersPerThread filters
* This condition covers the case when B_X is not divisible by filtersPerThread.
* In this case, not all of the threads will participate in the loading operation.
* This ensures that in each loop iteration, an integer number of rows of shFilters
* are filled, which makes indexing simple.
* nvcc is behaving in a completely insane way: removing this condition under
* template parameters that guarantee it to be true actually slows down
* the computation.
*
*/
if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) {
#pragma unroll
for (int c = 0; c < colorCache; c += B_X/filtersPerThread) {
if (colorCache % (B_X/filtersPerThread) == 0 || c + shFilterLoadY < colorCache) {
shFilters[c + shFilterLoadY][shFilterLoadX] = filters[((oc+c) * filterPixels + p) * numFilters];
}
}
}
/*
* Load a pixel from B_X*imgsPerThread images.
*/
const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img
float* m = &images[imgStride * (oc * imgPixels + pixIdx)];
#pragma unroll
for (int c = 0; c < colorCache; c += B_Y) {
if (colorCache % B_Y == 0 || threadIdx.y + c < colorCache) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
shImages[c + threadIdx.y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X];
} else {
shImages[c + threadIdx.y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
for (int c = 0; c < colorCache; c++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
prod[f][g] += shImages[c][g * B_X + threadIdx.x] * shFilters[c][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
// Note: reversing order of these loops saves 2 registers, but costs time
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModules, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _filterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numFilters = filters.getNumCols();
int numModules = numModulesY * numModulesX;
int numImages = images.getNumCols();
int imgPixels = images.getNumRows()/numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int filterModuleMult = conv ? 1 : numModules;
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(numFilters % (16 * numGroups) == 0);
assert(numImgColors % numGroups == 0);
//images.printShape("images");
//printf("rows: %d, pixels: %d, colors: %d\n", images.getNumRows(), imgPixels, numImgColors);
//images.printShape("images");
assert(images.getNumRows() == imgPixels * numImgColors);
assert(imgSizeY * imgSizeX == imgPixels);
int numFiltersPerGroup = numFilters / numGroups;
int imgStride = images.getStride(); // images does not need to be a contiguous matrix
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = int(sqrt(filterPixels));
assert(filterSize * filterSize == filterPixels);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(!images.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
assert(filters.isContiguous());
assert(targets.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int filtersPerThread, threadsY = 4;
if (numImgColors <= 3) {
// Special kernels written for colors = 3, filters = 64 and colors = 3, filters = 48 cases.
// The remaining cases use the old routines.
// TODO: Modernize the remaining cases if you care about them.
filtersPerThread = numFiltersPerGroup % 64 == 0 ? 16 : numFiltersPerGroup % 48 == 0 ? 12 : numFiltersPerGroup % 32 == 0 ? 8 : 4;
} else {
filtersPerThread = numFiltersPerGroup % 64 == 0 ? 16 : numFiltersPerGroup % 32 == 0 ? 8 : 4;
threadsY = numFiltersPerGroup % 128 == 0 && numFilterColors % 8 == 0 && imgsPerThread != 4 ? 8 : 4;
}
int threadsX = 32;
dim3 threads(threadsX, threadsY);
dim3 blocks = dim3(DIVUP(numImages, threads.x * imgsPerThread), (numModules * numFilters) / (threads.y * filtersPerThread));
bool checkImgBounds = numImages % (threads.x*imgsPerThread) != 0;
bool scale = scaleTargets != 0;
if (scaleTargets == 0) {
targets.resize(numFilters * numModules, numImages);
} else {
assert(targets.getNumRows() == numFilters * numModules);
assert(targets.getNumCols() == numImages);
}
cudaStream_t stream = NVMatrix::getDefaultStream();
// Auto-generated calling code...
// NOTE: The calling code is set up such that if checkImgBounds is true, then imgsPerThread = 1.
// In principle it doesn't have to be this way, and you may want to optimize for that case.
if (scale == false) {
if (checkImgBounds == false) {
if (numFilterColors % 8 == 0) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if (images.getNumDataBytes() < TEXTURE_SIZE_MAX) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferL1);
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferL1);
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numFiltersPerGroup % 64 == 0) {
if (images.getNumDataBytes() < TEXTURE_SIZE_MAX) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferL1);
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferL1);
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors % 4 == 0) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 3) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3 < 4, 32, 4, 16, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3 < 4, 32, 4, 16, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3 < 4, 32, 4, 12, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3 < 4, 32, 4, 12, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 16, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 12, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 2) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 16, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 12, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 16, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 12, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 1) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 16, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 12, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 16, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 12, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
else if (checkImgBounds == true) {
if (numFilterColors % 8 == 0) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors % 4 == 0) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 3) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 3, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 3, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 2) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 2, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 2, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 1) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 1, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 1, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, 4, false, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
}
else if (scale == true) {
if (checkImgBounds == false) {
if (numFilterColors % 8 == 0) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if (images.getNumDataBytes() < TEXTURE_SIZE_MAX) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferL1);
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferL1);
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numFiltersPerGroup % 64 == 0) {
if (images.getNumDataBytes() < TEXTURE_SIZE_MAX) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferL1);
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferL1);
filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4 < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 8, 8, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 4, 8, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 8, 32, 2, 16, 8, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 16, 8, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 8, 8, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 4, 8, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors % 4 == 0) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 16, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 8, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 4, 4, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 16, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 8, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 2, 4, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 3) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3 < 4, 32, 4, 16, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3 < 4, 32, 4, 16, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3 < 4, 32, 4, 12, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3 < 4, 32, 4, 12, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(),numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 16, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 12, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 2) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 16, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 12, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 16, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 12, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 1) {
if (numImages % 128 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 16, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 16, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 12, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 12, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 8, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 4, 4, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 64 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 16, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 16, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 12, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 12, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 8, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 2, 4, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
else if (numImages % 32 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, false > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
else if (checkImgBounds == true) {
if (numFilterColors % 8 == 0) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 8, 32, 1, 16, 8, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 8, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 8, 8, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 4, 8, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors % 4 == 0) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 16, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 8, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse2 < 4, 32, 1, 4, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 3) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 3, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 3, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 2) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 2, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 2, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
else if (numFilterColors == 1) {
if (numImages % 1 == 0) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 16, 1, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 12, 1, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
else if (numFiltersPerGroup % 1 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, 4, true, true > <<<blocks, threads, 0, stream>>>(images.getDevData(), filters.getDevData(), targets.getDevData(), numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
}
getLastCudaError("filterActs: kernel execution failed");
}
void convFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups) {
convFilterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1);
}
void convFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups) {
localFilterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1);
}
void localFilterActs(NVMatrix& images, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_filterActs(images, filters, targets, imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
|
068eff4dd763212e492a5666b0e046fd3702a010.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <thrust/device_vector.h>
#include <thrust/random.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/shuffle.h>
namespace cudf {
namespace detail {
std::unique_ptr<table> sample(table_view const& input,
size_type const n,
sample_with_replacement replacement,
int64_t const seed,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(n >= 0, "expected number of samples should be non-negative");
auto const num_rows = input.num_rows();
if ((n > num_rows) and (replacement == sample_with_replacement::FALSE)) {
CUDF_FAIL("If n > number of rows, then multiple sampling of the same row should be allowed");
}
if (n == 0) return cudf::empty_like(input);
if (replacement == sample_with_replacement::TRUE) {
auto RandomGen = [seed, num_rows] __device__(auto i) {
thrust::default_random_engine rng(seed);
thrust::uniform_int_distribution<size_type> dist{0, num_rows};
rng.discard(i);
return dist(rng);
};
auto begin =
thrust::make_transform_iterator(thrust::counting_iterator<size_type>(0), RandomGen);
auto end = thrust::make_transform_iterator(thrust::counting_iterator<size_type>(n), RandomGen);
return detail::gather(input, begin, end, false, mr, stream);
} else {
auto gather_map =
make_numeric_column(data_type{type_id::INT32}, num_rows, mask_state::UNALLOCATED, stream);
auto gather_map_mutable_view = gather_map->mutable_view();
// Shuffle all the row indices
thrust::shuffle_copy(rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<size_type>(0),
thrust::counting_iterator<size_type>(num_rows),
gather_map_mutable_view.begin<size_type>(),
thrust::default_random_engine(seed));
auto gather_map_view =
(n == num_rows) ? gather_map->view() : cudf::slice(gather_map->view(), {0, n})[0];
return detail::gather(input,
gather_map_view.begin<size_type>(),
gather_map_view.end<size_type>(),
false,
mr,
stream);
}
}
} // namespace detail
std::unique_ptr<table> sample(table_view const& input,
size_type const n,
sample_with_replacement replacement,
int64_t const seed,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sample(input, n, replacement, seed, mr);
}
} // namespace cudf
|
068eff4dd763212e492a5666b0e046fd3702a010.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <thrust/device_vector.h>
#include <thrust/random.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/shuffle.h>
namespace cudf {
namespace detail {
std::unique_ptr<table> sample(table_view const& input,
size_type const n,
sample_with_replacement replacement,
int64_t const seed,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(n >= 0, "expected number of samples should be non-negative");
auto const num_rows = input.num_rows();
if ((n > num_rows) and (replacement == sample_with_replacement::FALSE)) {
CUDF_FAIL("If n > number of rows, then multiple sampling of the same row should be allowed");
}
if (n == 0) return cudf::empty_like(input);
if (replacement == sample_with_replacement::TRUE) {
auto RandomGen = [seed, num_rows] __device__(auto i) {
thrust::default_random_engine rng(seed);
thrust::uniform_int_distribution<size_type> dist{0, num_rows};
rng.discard(i);
return dist(rng);
};
auto begin =
thrust::make_transform_iterator(thrust::counting_iterator<size_type>(0), RandomGen);
auto end = thrust::make_transform_iterator(thrust::counting_iterator<size_type>(n), RandomGen);
return detail::gather(input, begin, end, false, mr, stream);
} else {
auto gather_map =
make_numeric_column(data_type{type_id::INT32}, num_rows, mask_state::UNALLOCATED, stream);
auto gather_map_mutable_view = gather_map->mutable_view();
// Shuffle all the row indices
thrust::shuffle_copy(rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<size_type>(0),
thrust::counting_iterator<size_type>(num_rows),
gather_map_mutable_view.begin<size_type>(),
thrust::default_random_engine(seed));
auto gather_map_view =
(n == num_rows) ? gather_map->view() : cudf::slice(gather_map->view(), {0, n})[0];
return detail::gather(input,
gather_map_view.begin<size_type>(),
gather_map_view.end<size_type>(),
false,
mr,
stream);
}
}
} // namespace detail
std::unique_ptr<table> sample(table_view const& input,
size_type const n,
sample_with_replacement replacement,
int64_t const seed,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sample(input, n, replacement, seed, mr);
}
} // namespace cudf
|
9d1760e02298ab344be2c283748efa415817b46e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "UtilityGPU.h"
#include <hip/hip_runtime.h>
#include <iostream>
using namespace cv;
using namespace std;
int divUp(int x, int y)
{
return int(x/float(y) +0.5);
}
void printMatrix2(Mat &M, std::string matrix)
{
printf("Matrix \"%s\" is %i x %i\n", matrix.c_str(), M.rows, M.cols);
for( int r=0;r<M.rows;++r)
{
for( int c=0;c<M.cols;++c)
{
//printf("%.20lf ",M.at<double>(r,c));
printf("%.6lf ",M.at<float>(r,c));
}
cout << endl;
}
cout <<endl;
}
void printMatrix2(std::string matrix,Mat &M)
{
printf("Matrix \"%s\" is %i x %i\n", matrix.c_str(), M.rows, M.cols);
for( int r=0;r<M.rows;++r)
{
for( int c=0;c<M.cols;++c)
{
//printf("%.20lf ",M.at<double>(r,c));
printf("%.6lf ",M.at<float>(r,c));
}
cout <<endl;
}
cout <<endl;
}
//Device code
//foreGroundParm is floating value
//input: foreGroundParm: 0-255
//result: occupancyOutput: 0 - 1 (average)
__global__ void CalcVolumeCostAverage_hd_float(float* voxelCoordsParm, int voxelNum, float* foreGroundParm, int foreGroundNum, float* projectMatParm,float* occupancyOutput)
{
//float maxValue = foreGroundNum;
// int sizeOfOneUnitP = 12*sizeof(float);
const int voxelIdx = 512* blockIdx.x + threadIdx.x;
if(voxelIdx<voxelNum)
{
float* tempCoord = &voxelCoordsParm[voxelIdx*3];
int totalCnt=0;
float foreValue=0;
for(int i=0;i<foreGroundNum;++i)
{
float* tempForeground = &foreGroundParm[2073600*i]; //1920*1080 = 2073600
float* P = &projectMatParm[12*i];
//float* P = projectMatParm+12;//+1 [12*i];
//projection
float z = P[8]*tempCoord[0] + P[9]*tempCoord[1] + P[10]*tempCoord[2] + P[11] ; //buf fixed.....it was int. What a bug.
float x = (P[0]*tempCoord[0] + P[1]*tempCoord[1] + P[2]*tempCoord[2] + P[3] )/z;
float y = (P[4]*tempCoord[0] + P[5]*tempCoord[1] + P[6]*tempCoord[2] + P[7] )/z;
//boundary checking
if(!(x<1 ||x>=1919 || y<1 || y>=1079))
{
//bilinear interpolation
int x_floor = float(x);
int x_ceil = x_floor+1;
int y_floor = float(y);
int y_ceil = y_floor +1;
float value_bl = tempForeground[y_floor*1920 + x_floor];
float value_br = tempForeground[y_floor*1920+ x_ceil];
float value_tl = tempForeground[y_ceil*1920+ x_floor];
float value_tr = tempForeground[y_ceil*1920 + x_ceil];
float alpha = y - y_floor;
float value_l = (1-alpha) *value_bl + alpha * value_tl;
float value_r = (1-alpha) *value_br + alpha * value_tr;
float beta = x - x_floor;
float finalValue = (1-beta) *value_l + beta * value_r;
foreValue += finalValue;
totalCnt++;
}
}
//occupancyOutput[voxelIdx] =0.5;
if(foreValue<=1 || totalCnt<=1)
occupancyOutput[voxelIdx] = 0;
else
occupancyOutput[voxelIdx] = float(foreValue)/float(totalCnt*255);
}
}
//Device code
//foreGroundParm is floating value
//input: foreGroundParm: 0-255
//result: occupancyOutput: 0 - 1 (average)
__global__ void CalcVolumeCostAverage_hd_float_onlyPositiveCost(float* voxelCoordsParm, int voxelNum, float* foreGroundParm, int foreGroundNum, float* projectMatParm,float* occupancyOutput)
{
//float maxValue = foreGroundNum;
// int sizeOfOneUnitP = 12*sizeof(float);
const int voxelIdx = 512* blockIdx.x + threadIdx.x;
if(voxelIdx<voxelNum)
{
float* tempCoord = &voxelCoordsParm[voxelIdx*3];
int totalCnt=0;
float foreValue=0;
for(int i=0;i<foreGroundNum;++i)
{
float* tempForeground = &foreGroundParm[2073600*i]; //1920*1080 = 2073600
float* P = &projectMatParm[12*i];
//float* P = projectMatParm+12;//+1 [12*i];
//projection
float z = P[8]*tempCoord[0] + P[9]*tempCoord[1] + P[10]*tempCoord[2] + P[11] ; //buf fixed.....it was int. What a bug.
float x = (P[0]*tempCoord[0] + P[1]*tempCoord[1] + P[2]*tempCoord[2] + P[3] )/z;
float y = (P[4]*tempCoord[0] + P[5]*tempCoord[1] + P[6]*tempCoord[2] + P[7] )/z;
//boundary checking
if(!(x<1 ||x>=1919 || y<1 || y>=1079))
{
//bilinear interpolation
int x_floor = float(x);
int x_ceil = x_floor+1;
int y_floor = float(y);
int y_ceil = y_floor +1;
float value_bl = tempForeground[y_floor*1920 + x_floor];
float value_br = tempForeground[y_floor*1920+ x_ceil];
float value_tl = tempForeground[y_ceil*1920+ x_floor];
float value_tr = tempForeground[y_ceil*1920 + x_ceil];
float alpha = y - y_floor;
float value_l = (1-alpha) *value_bl + alpha * value_tl;
float value_r = (1-alpha) *value_br + alpha * value_tr;
float beta = x - x_floor;
float finalValue = (1-beta) *value_l + beta * value_r;
foreValue += finalValue;
if(finalValue>0)
totalCnt++;
}
}
//occupancyOutput[voxelIdx] =0.5;
if(foreValue<=1 || totalCnt<=1)
occupancyOutput[voxelIdx] = 0;
else
occupancyOutput[voxelIdx] = float(foreValue)/float(totalCnt*255);
}
}
//Device code
//foreGroundParm is floating value
//input: foreGroundParm: 0-255
//result: occupancyOutput: 0 - 1 (average)
__global__ void CalcVolumeCostAverage_vga_float(float* voxelCoordsParm, int voxelNum, float* foreGroundParm, int foreGroundNum, float* projectMatParm,float* occupancyOutput)
{
//float maxValue = foreGroundNum;
// int sizeOfOneUnitP = 12*sizeof(float);
const int voxelIdx = 512* blockIdx.x + threadIdx.x;
if(voxelIdx<voxelNum)
{
float* tempCoord = &voxelCoordsParm[voxelIdx*3];
int totalCnt=0;
float valueSum=0;
for(int i=0;i<foreGroundNum;++i)
{
float* tempForeground = &foreGroundParm[307200*i]; //640*480 = 307200
float* P = &projectMatParm[12*i];
//float* P = projectMatParm+12;//+1 [12*i];
//projection
float z = P[8]*tempCoord[0] + P[9]*tempCoord[1] + P[10]*tempCoord[2] + P[11] ; //buf fixed.....it was int. What a bug.
float x = (P[0]*tempCoord[0] + P[1]*tempCoord[1] + P[2]*tempCoord[2] + P[3] )/z;
float y = (P[4]*tempCoord[0] + P[5]*tempCoord[1] + P[6]*tempCoord[2] + P[7] )/z;
//boundary checking
if(!(x<1 ||x>=639 || y<1 || y>=479))
{
//bilinear interpolation
int x_floor = float(x);
int x_ceil = x_floor+1;
int y_floor = float(y);
int y_ceil = y_floor +1;
float value_bl = tempForeground[y_floor*640 + x_floor];
float value_br = tempForeground[y_floor*640 + x_ceil];
float value_tl = tempForeground[y_ceil*640 + x_floor];
float value_tr = tempForeground[y_ceil*640 + x_ceil];
float alpha = y - y_floor;
float value_l = (1-alpha) *value_bl + alpha * value_tl;
float value_r = (1-alpha) *value_br + alpha * value_tr;
float beta = x - x_floor;
float curValueFinal = (1-beta) *value_l + beta * value_r;
valueSum += curValueFinal;
//if(curValueFinal>0) //ignore non-related views...
totalCnt++;
}
}
//occupancyOutput[voxelIdx] =0.5;
if(totalCnt<=1)
occupancyOutput[voxelIdx] = 0;
else
occupancyOutput[voxelIdx] = float(valueSum)/float(totalCnt*255);
}
}
int bFirstCall=true;
bool DetectionCostVolumeGeneration_float_GPU_average_vga(float* voxelCoords,int voxelNum, vector<Mat_<float> >& foregroundVect,vector<Mat_<float>>& projectMatVect,float* occupancyOutput)
{
int nDevices;
hipGetDeviceCount(&nDevices);
if(bFirstCall)
{
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
printf("## Selected GPU idx: %d\n", g_gpu_device_id);
bFirstCall =false;
}
hipSetDevice(g_gpu_device_id);
size_t free,total;
hipMemGetInfo(&free,&total);
//printf("## CUDA:: MemoryChecking ::free%f MB, total %f MB\n",free/1e6,total/1e6);
assert(foregroundVect.size() == projectMatVect.size());
float* foreGroundParmGPU =NULL; //forground image data (or detection cost map)
hipMalloc((void**) &foreGroundParmGPU, 307200 * foregroundVect.size()*sizeof(float)); //640x480 = 307200
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 1 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
return false;
}
for(int i=0;i<foregroundVect.size();++i)
{
hipMemcpy(&foreGroundParmGPU[307200*i],foregroundVect[i].data,307200*sizeof(float),hipMemcpyHostToDevice);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 2 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
return false;
}
}
float* projectMatParmGPU=NULL;
int sizeOfOneUnitP = 12 *sizeof(float);
hipMalloc((void**) &projectMatParmGPU, sizeOfOneUnitP* foregroundVect.size());
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 3 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(projectMatParmGPU);
return false;
}
for(int i=0;i<foregroundVect.size();++i)
{
hipMemcpy(&projectMatParmGPU[12*i],projectMatVect[i].data,sizeOfOneUnitP,hipMemcpyHostToDevice);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 4 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(projectMatParmGPU);
return false;
}
}
////////////////////////////////////////////////////////////////////////////////
//// Voxel memory allocation
////////////////////////////////////////////////////////////////////////////////
int voxelSegNum = 2e8; //About 200 MB Voxel
int iterNum = voxelNum/float(voxelSegNum);
int voxelNuminFinalIter;
if(voxelNum%(voxelSegNum)>0)
{
iterNum++;
voxelNuminFinalIter = voxelNum%(voxelSegNum);
}
if(iterNum ==1)
voxelSegNum = voxelNum;
//printf("GPU interation Num %d\n",iterNum);
//allocate voxel Pos Memory
float* occupancyOutputGPU=NULL; //Contains 3D volume costamp
hipMalloc((void**) &occupancyOutputGPU, voxelSegNum*sizeof(float)); //voxelSegNum * 4Byte
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 5 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
float* voxelCoordsGPU=NULL;
hipMalloc((void**) &voxelCoordsGPU, voxelSegNum*sizeof(float)*3); //voxelSegNum * 12Byte
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 7 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
hipMemGetInfo(&free,&total);
//printf("## CUDA:: after :: free%f MB, total %f MB\n",free/1e6,total/1e6);
for(int i=0;i<iterNum;++i)
{
hipMemset((void*) occupancyOutputGPU, 0, voxelSegNum*sizeof(float));
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 6 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
if(i == iterNum-1 && voxelNuminFinalIter>0) //Final iterlation
{
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 8 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
float* tempVoxelPtr = &voxelCoords[i*voxelSegNum*3];
hipMemcpy(voxelCoordsGPU,tempVoxelPtr,voxelNuminFinalIter*sizeof(float)*3,hipMemcpyHostToDevice);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 9 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
dim3 block(512,1);
dim3 grid(divUp(voxelNuminFinalIter,block.x), 1);
if(grid.x>65535)
{
//printf("GPU:: grid size is too big !! (%d,%d)\n",grid.x,grid.y);
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
grid.x = 65535;
}
//else
//printf("GPU:: grid size : (%d,%d)\n",grid.x,grid.y);
hipLaunchKernelGGL(( CalcVolumeCostAverage_vga_float), dim3(grid), dim3(block), 0, 0, voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
//CalcVolumeCostAverage_vga_float_onlyPositiveCost<<<grid, block>>>(voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
//CalcVolumeCostWithOrientation<<<grid, block>>>(voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,(int)camCenters.size(),camCenterParmGPU,occupancyOutputGPU);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 10 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
//dst.download(resultImage);
//ImageSC2(resultImage);
hipDeviceSynchronize();
hipMemcpy(&occupancyOutput[i*voxelSegNum],occupancyOutputGPU,voxelNuminFinalIter*sizeof(float),hipMemcpyDeviceToHost);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 11 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
}
else
{
float* tempVoxelPtr = &voxelCoords[i*voxelSegNum*3];//i*voxelSegNum];
hipMemcpy(voxelCoordsGPU,tempVoxelPtr,voxelSegNum*sizeof(float)*3,hipMemcpyHostToDevice);
dim3 block(512,1);
dim3 grid(divUp(voxelSegNum,block.x), 1);
if(grid.x>65535)
{
//printf("GPU:: grid size is too big !! (%d,%d)\n",grid.x,grid.y);
grid.x = 65535;
}
//else
//printf("GPU:: grid size : (%d,%d)\n",grid.x,grid.y);
hipLaunchKernelGGL(( CalcVolumeCostAverage_vga_float), dim3(grid), dim3(block), 0, 0, voxelCoordsGPU,voxelSegNum,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
hipGetLastError();
//dst.download(resultImage);
//ImageSC2(resultImage);
hipDeviceSynchronize();
hipMemcpy(&occupancyOutput[i*voxelSegNum],occupancyOutputGPU,voxelSegNum*sizeof(float),hipMemcpyDeviceToHost);
hipError_t error= hipGetLastError();
if(error != hipSuccess)
{
// something's gone wrong
// print out the CUDA error as a string
printf("CUDA Error: %s\n", hipGetErrorString(error));
}
}
//printf("CUDA Iteration %d/%d\n",i,iterNum);//
}
//delete[] voxelCoords;
hipFree(voxelCoordsGPU);
hipFree(foreGroundParmGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
hipMemGetInfo(&free,&total);
//printf("## CUDA:: return :: free%f MB, total %f MB\n",free/1e6,total/1e6);
return true;
}
bool DetectionCostVolumeGeneration_float_GPU_average_hd(float* voxelCoords,int voxelNum, vector<Mat_<float> >& foregroundVect,vector<Mat_<float>>& projectMatVect,float* occupancyOutput,bool bOnlyPositiveValueAvg)
{
size_t free,total;
hipMemGetInfo(&free,&total);
//printf("## CUDA:: before ::a free%f MB, total %f MB\n",free/1e6,total/1e6);
assert(foregroundVect.size() == projectMatVect.size());
float* foreGroundParmGPU =NULL; //forground image data (or detection cost map)
//hipMalloc((void**) &foreGroundParmGPU, 307200 * foregroundVect.size()*sizeof(float)); //640x480 = 307200
hipMalloc((void**) &foreGroundParmGPU, 2073600 * foregroundVect.size()*sizeof(float)); //1920x1080= 2073600
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 1 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
return false;
}
for(int i=0;i<foregroundVect.size();++i)
{
hipMemcpy(&foreGroundParmGPU[2073600*i],foregroundVect[i].data,2073600*sizeof(float),hipMemcpyHostToDevice);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 2 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
return false;
}
}
float* projectMatParmGPU=NULL;
int sizeOfOneUnitP = 12 *sizeof(float);
hipMalloc((void**) &projectMatParmGPU, sizeOfOneUnitP* foregroundVect.size());
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 3 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(projectMatParmGPU);
return false;
}
for(int i=0;i<foregroundVect.size();++i)
{
hipMemcpy(&projectMatParmGPU[12*i],projectMatVect[i].data,sizeOfOneUnitP,hipMemcpyHostToDevice);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 4 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(projectMatParmGPU);
return false;
}
}
////////////////////////////////////////////////////////////////////////////////
//// Voxel memory allocation
////////////////////////////////////////////////////////////////////////////////
int voxelSegNum = 2e8; //About 200 MB Voxel
int iterNum = voxelNum/float(voxelSegNum);
int voxelNuminFinalIter;
if(voxelNum%(voxelSegNum)>0)
{
iterNum++;
voxelNuminFinalIter = voxelNum%(voxelSegNum);
}
if(iterNum ==1)
voxelSegNum = voxelNum;
//printf("GPU interation Num %d\n",iterNum);
//allocate voxel Pos Memory
float* occupancyOutputGPU=NULL; //Contains 3D volume costamp
hipMalloc((void**) &occupancyOutputGPU, voxelSegNum*sizeof(float)); //voxelSegNum * 4Byte
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 5 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
float* voxelCoordsGPU=NULL;
hipMalloc((void**) &voxelCoordsGPU, voxelSegNum*sizeof(float)*3); //voxelSegNum * 12Byte
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 7 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
hipMemGetInfo(&free,&total);
//printf("## CUDA:: after :: free%f MB, total %f MB\n",free/1e6,total/1e6);
for(int i=0;i<iterNum;++i)
{
hipMemset((void*) occupancyOutputGPU, 0, voxelSegNum*sizeof(float));
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 6 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
if(i == iterNum-1 && voxelNuminFinalIter>0) //Final iterlation
{
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 8 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
float* tempVoxelPtr = &voxelCoords[i*voxelSegNum*3];
hipMemcpy(voxelCoordsGPU,tempVoxelPtr,voxelNuminFinalIter*sizeof(float)*3,hipMemcpyHostToDevice);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 9 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
dim3 block(512,1);
dim3 grid(divUp(voxelNuminFinalIter,block.x), 1);
if(grid.x>65535)
{
//printf("GPU:: grid size is too big !! (%d,%d)\n",grid.x,grid.y);
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
grid.x = 65535;
}
//else
//printf("GPU:: grid size : (%d,%d)\n",grid.x,grid.y);
if(bOnlyPositiveValueAvg==false) //avg = valuSum / NumberOfView_ProjectedOnImage
hipLaunchKernelGGL(( CalcVolumeCostAverage_hd_float), dim3(grid), dim3(block), 0, 0, voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
else //avg = valuSum / (NumberOfView_ProjectedOnImage & positiveValidValue)
hipLaunchKernelGGL(( CalcVolumeCostAverage_hd_float_onlyPositiveCost), dim3(grid), dim3(block), 0, 0, voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
//CalcVolumeCostWithOrientation<<<grid, block>>>(voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,(int)camCenters.size(),camCenterParmGPU,occupancyOutputGPU);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 10 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
//dst.download(resultImage);
//ImageSC2(resultImage);
hipDeviceSynchronize();
hipMemcpy(&occupancyOutput[i*voxelSegNum],occupancyOutputGPU,voxelNuminFinalIter*sizeof(float),hipMemcpyDeviceToHost);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("####################### CUDA Error 11 : %s ###################\n", hipGetErrorString(error));
hipFree(foreGroundParmGPU);
hipFree(voxelCoordsGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
return false;
}
}
else
{
float* tempVoxelPtr = &voxelCoords[i*voxelSegNum*3];//i*voxelSegNum];
hipMemcpy(voxelCoordsGPU,tempVoxelPtr,voxelSegNum*sizeof(float)*3,hipMemcpyHostToDevice);
dim3 block(512,1);
dim3 grid(divUp(voxelSegNum,block.x), 1);
if(grid.x>65535)
{
//printf("GPU:: grid size is too big !! (%d,%d)\n",grid.x,grid.y);
grid.x = 65535;
}
//else
//printf("GPU:: grid size : (%d,%d)\n",grid.x,grid.y);
hipLaunchKernelGGL(( CalcVolumeCostAverage_hd_float), dim3(grid), dim3(block), 0, 0, voxelCoordsGPU,voxelSegNum,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
hipGetLastError();
//dst.download(resultImage);
//ImageSC2(resultImage);
hipDeviceSynchronize();
hipMemcpy(&occupancyOutput[i*voxelSegNum],occupancyOutputGPU,voxelSegNum*sizeof(float),hipMemcpyDeviceToHost);
hipError_t error= hipGetLastError();
if(error != hipSuccess)
{
// something's gone wrong
// print out the CUDA error as a string
printf("CUDA Error: %s\n", hipGetErrorString(error));
}
}
//printf("CUDA Iteration %d/%d\n",i,iterNum);//
}
//delete[] voxelCoords;
hipFree(voxelCoordsGPU);
hipFree(foreGroundParmGPU);
hipFree(projectMatParmGPU);
hipFree(occupancyOutputGPU);
hipMemGetInfo(&free,&total);
//printf("## CUDA:: return :: free%f MB, total %f MB\n",free/1e6,total/1e6);
return true;
}
|
9d1760e02298ab344be2c283748efa415817b46e.cu
|
#include "UtilityGPU.h"
#include <cuda.h>
#include <iostream>
using namespace cv;
using namespace std;
int divUp(int x, int y)
{
return int(x/float(y) +0.5);
}
void printMatrix2(Mat &M, std::string matrix)
{
printf("Matrix \"%s\" is %i x %i\n", matrix.c_str(), M.rows, M.cols);
for( int r=0;r<M.rows;++r)
{
for( int c=0;c<M.cols;++c)
{
//printf("%.20lf ",M.at<double>(r,c));
printf("%.6lf ",M.at<float>(r,c));
}
cout << endl;
}
cout <<endl;
}
void printMatrix2(std::string matrix,Mat &M)
{
printf("Matrix \"%s\" is %i x %i\n", matrix.c_str(), M.rows, M.cols);
for( int r=0;r<M.rows;++r)
{
for( int c=0;c<M.cols;++c)
{
//printf("%.20lf ",M.at<double>(r,c));
printf("%.6lf ",M.at<float>(r,c));
}
cout <<endl;
}
cout <<endl;
}
//Device code
//foreGroundParm is floating value
//input: foreGroundParm: 0-255
//result: occupancyOutput: 0 - 1 (average)
__global__ void CalcVolumeCostAverage_hd_float(float* voxelCoordsParm, int voxelNum, float* foreGroundParm, int foreGroundNum, float* projectMatParm,float* occupancyOutput)
{
//float maxValue = foreGroundNum;
// int sizeOfOneUnitP = 12*sizeof(float);
const int voxelIdx = 512* blockIdx.x + threadIdx.x;
if(voxelIdx<voxelNum)
{
float* tempCoord = &voxelCoordsParm[voxelIdx*3];
int totalCnt=0;
float foreValue=0;
for(int i=0;i<foreGroundNum;++i)
{
float* tempForeground = &foreGroundParm[2073600*i]; //1920*1080 = 2073600
float* P = &projectMatParm[12*i];
//float* P = projectMatParm+12;//+1 [12*i];
//projection
float z = P[8]*tempCoord[0] + P[9]*tempCoord[1] + P[10]*tempCoord[2] + P[11] ; //buf fixed.....it was int. What a bug.
float x = (P[0]*tempCoord[0] + P[1]*tempCoord[1] + P[2]*tempCoord[2] + P[3] )/z;
float y = (P[4]*tempCoord[0] + P[5]*tempCoord[1] + P[6]*tempCoord[2] + P[7] )/z;
//boundary checking
if(!(x<1 ||x>=1919 || y<1 || y>=1079))
{
//bilinear interpolation
int x_floor = float(x);
int x_ceil = x_floor+1;
int y_floor = float(y);
int y_ceil = y_floor +1;
float value_bl = tempForeground[y_floor*1920 + x_floor];
float value_br = tempForeground[y_floor*1920+ x_ceil];
float value_tl = tempForeground[y_ceil*1920+ x_floor];
float value_tr = tempForeground[y_ceil*1920 + x_ceil];
float alpha = y - y_floor;
float value_l = (1-alpha) *value_bl + alpha * value_tl;
float value_r = (1-alpha) *value_br + alpha * value_tr;
float beta = x - x_floor;
float finalValue = (1-beta) *value_l + beta * value_r;
foreValue += finalValue;
totalCnt++;
}
}
//occupancyOutput[voxelIdx] =0.5;
if(foreValue<=1 || totalCnt<=1)
occupancyOutput[voxelIdx] = 0;
else
occupancyOutput[voxelIdx] = float(foreValue)/float(totalCnt*255);
}
}
//Device code
//foreGroundParm is floating value
//input: foreGroundParm: 0-255
//result: occupancyOutput: 0 - 1 (average)
__global__ void CalcVolumeCostAverage_hd_float_onlyPositiveCost(float* voxelCoordsParm, int voxelNum, float* foreGroundParm, int foreGroundNum, float* projectMatParm,float* occupancyOutput)
{
//float maxValue = foreGroundNum;
// int sizeOfOneUnitP = 12*sizeof(float);
const int voxelIdx = 512* blockIdx.x + threadIdx.x;
if(voxelIdx<voxelNum)
{
float* tempCoord = &voxelCoordsParm[voxelIdx*3];
int totalCnt=0;
float foreValue=0;
for(int i=0;i<foreGroundNum;++i)
{
float* tempForeground = &foreGroundParm[2073600*i]; //1920*1080 = 2073600
float* P = &projectMatParm[12*i];
//float* P = projectMatParm+12;//+1 [12*i];
//projection
float z = P[8]*tempCoord[0] + P[9]*tempCoord[1] + P[10]*tempCoord[2] + P[11] ; //buf fixed.....it was int. What a bug.
float x = (P[0]*tempCoord[0] + P[1]*tempCoord[1] + P[2]*tempCoord[2] + P[3] )/z;
float y = (P[4]*tempCoord[0] + P[5]*tempCoord[1] + P[6]*tempCoord[2] + P[7] )/z;
//boundary checking
if(!(x<1 ||x>=1919 || y<1 || y>=1079))
{
//bilinear interpolation
int x_floor = float(x);
int x_ceil = x_floor+1;
int y_floor = float(y);
int y_ceil = y_floor +1;
float value_bl = tempForeground[y_floor*1920 + x_floor];
float value_br = tempForeground[y_floor*1920+ x_ceil];
float value_tl = tempForeground[y_ceil*1920+ x_floor];
float value_tr = tempForeground[y_ceil*1920 + x_ceil];
float alpha = y - y_floor;
float value_l = (1-alpha) *value_bl + alpha * value_tl;
float value_r = (1-alpha) *value_br + alpha * value_tr;
float beta = x - x_floor;
float finalValue = (1-beta) *value_l + beta * value_r;
foreValue += finalValue;
if(finalValue>0)
totalCnt++;
}
}
//occupancyOutput[voxelIdx] =0.5;
if(foreValue<=1 || totalCnt<=1)
occupancyOutput[voxelIdx] = 0;
else
occupancyOutput[voxelIdx] = float(foreValue)/float(totalCnt*255);
}
}
//Device code
//foreGroundParm is floating value
//input: foreGroundParm: 0-255
//result: occupancyOutput: 0 - 1 (average)
__global__ void CalcVolumeCostAverage_vga_float(float* voxelCoordsParm, int voxelNum, float* foreGroundParm, int foreGroundNum, float* projectMatParm,float* occupancyOutput)
{
//float maxValue = foreGroundNum;
// int sizeOfOneUnitP = 12*sizeof(float);
const int voxelIdx = 512* blockIdx.x + threadIdx.x;
if(voxelIdx<voxelNum)
{
float* tempCoord = &voxelCoordsParm[voxelIdx*3];
int totalCnt=0;
float valueSum=0;
for(int i=0;i<foreGroundNum;++i)
{
float* tempForeground = &foreGroundParm[307200*i]; //640*480 = 307200
float* P = &projectMatParm[12*i];
//float* P = projectMatParm+12;//+1 [12*i];
//projection
float z = P[8]*tempCoord[0] + P[9]*tempCoord[1] + P[10]*tempCoord[2] + P[11] ; //buf fixed.....it was int. What a bug.
float x = (P[0]*tempCoord[0] + P[1]*tempCoord[1] + P[2]*tempCoord[2] + P[3] )/z;
float y = (P[4]*tempCoord[0] + P[5]*tempCoord[1] + P[6]*tempCoord[2] + P[7] )/z;
//boundary checking
if(!(x<1 ||x>=639 || y<1 || y>=479))
{
//bilinear interpolation
int x_floor = float(x);
int x_ceil = x_floor+1;
int y_floor = float(y);
int y_ceil = y_floor +1;
float value_bl = tempForeground[y_floor*640 + x_floor];
float value_br = tempForeground[y_floor*640 + x_ceil];
float value_tl = tempForeground[y_ceil*640 + x_floor];
float value_tr = tempForeground[y_ceil*640 + x_ceil];
float alpha = y - y_floor;
float value_l = (1-alpha) *value_bl + alpha * value_tl;
float value_r = (1-alpha) *value_br + alpha * value_tr;
float beta = x - x_floor;
float curValueFinal = (1-beta) *value_l + beta * value_r;
valueSum += curValueFinal;
//if(curValueFinal>0) //ignore non-related views...
totalCnt++;
}
}
//occupancyOutput[voxelIdx] =0.5;
if(totalCnt<=1)
occupancyOutput[voxelIdx] = 0;
else
occupancyOutput[voxelIdx] = float(valueSum)/float(totalCnt*255);
}
}
int bFirstCall=true;
bool DetectionCostVolumeGeneration_float_GPU_average_vga(float* voxelCoords,int voxelNum, vector<Mat_<float> >& foregroundVect,vector<Mat_<float>>& projectMatVect,float* occupancyOutput)
{
int nDevices;
cudaGetDeviceCount(&nDevices);
if(bFirstCall)
{
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
printf("## Selected GPU idx: %d\n", g_gpu_device_id);
bFirstCall =false;
}
cudaSetDevice(g_gpu_device_id);
size_t free,total;
cudaMemGetInfo(&free,&total);
//printf("## CUDA:: MemoryChecking ::free%f MB, total %f MB\n",free/1e6,total/1e6);
assert(foregroundVect.size() == projectMatVect.size());
float* foreGroundParmGPU =NULL; //forground image data (or detection cost map)
cudaMalloc((void**) &foreGroundParmGPU, 307200 * foregroundVect.size()*sizeof(float)); //640x480 = 307200
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 1 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
return false;
}
for(int i=0;i<foregroundVect.size();++i)
{
cudaMemcpy(&foreGroundParmGPU[307200*i],foregroundVect[i].data,307200*sizeof(float),cudaMemcpyHostToDevice);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 2 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
return false;
}
}
float* projectMatParmGPU=NULL;
int sizeOfOneUnitP = 12 *sizeof(float);
cudaMalloc((void**) &projectMatParmGPU, sizeOfOneUnitP* foregroundVect.size());
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 3 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(projectMatParmGPU);
return false;
}
for(int i=0;i<foregroundVect.size();++i)
{
cudaMemcpy(&projectMatParmGPU[12*i],projectMatVect[i].data,sizeOfOneUnitP,cudaMemcpyHostToDevice);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 4 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(projectMatParmGPU);
return false;
}
}
////////////////////////////////////////////////////////////////////////////////
//// Voxel memory allocation
////////////////////////////////////////////////////////////////////////////////
int voxelSegNum = 2e8; //About 200 MB Voxel
int iterNum = voxelNum/float(voxelSegNum);
int voxelNuminFinalIter;
if(voxelNum%(voxelSegNum)>0)
{
iterNum++;
voxelNuminFinalIter = voxelNum%(voxelSegNum);
}
if(iterNum ==1)
voxelSegNum = voxelNum;
//printf("GPU interation Num %d\n",iterNum);
//allocate voxel Pos Memory
float* occupancyOutputGPU=NULL; //Contains 3D volume costamp
cudaMalloc((void**) &occupancyOutputGPU, voxelSegNum*sizeof(float)); //voxelSegNum * 4Byte
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 5 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
float* voxelCoordsGPU=NULL;
cudaMalloc((void**) &voxelCoordsGPU, voxelSegNum*sizeof(float)*3); //voxelSegNum * 12Byte
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 7 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
cudaMemGetInfo(&free,&total);
//printf("## CUDA:: after :: free%f MB, total %f MB\n",free/1e6,total/1e6);
for(int i=0;i<iterNum;++i)
{
cudaMemset((void*) occupancyOutputGPU, 0, voxelSegNum*sizeof(float));
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 6 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
if(i == iterNum-1 && voxelNuminFinalIter>0) //Final iterlation
{
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 8 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
float* tempVoxelPtr = &voxelCoords[i*voxelSegNum*3];
cudaMemcpy(voxelCoordsGPU,tempVoxelPtr,voxelNuminFinalIter*sizeof(float)*3,cudaMemcpyHostToDevice);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 9 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
dim3 block(512,1);
dim3 grid(divUp(voxelNuminFinalIter,block.x), 1);
if(grid.x>65535)
{
//printf("GPU:: grid size is too big !! (%d,%d)\n",grid.x,grid.y);
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
grid.x = 65535;
}
//else
//printf("GPU:: grid size : (%d,%d)\n",grid.x,grid.y);
CalcVolumeCostAverage_vga_float<<<grid, block>>>(voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
//CalcVolumeCostAverage_vga_float_onlyPositiveCost<<<grid, block>>>(voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
//CalcVolumeCostWithOrientation<<<grid, block>>>(voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,(int)camCenters.size(),camCenterParmGPU,occupancyOutputGPU);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 10 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
//dst.download(resultImage);
//ImageSC2(resultImage);
cudaDeviceSynchronize();
cudaMemcpy(&occupancyOutput[i*voxelSegNum],occupancyOutputGPU,voxelNuminFinalIter*sizeof(float),cudaMemcpyDeviceToHost);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 11 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
}
else
{
float* tempVoxelPtr = &voxelCoords[i*voxelSegNum*3];//i*voxelSegNum];
cudaMemcpy(voxelCoordsGPU,tempVoxelPtr,voxelSegNum*sizeof(float)*3,cudaMemcpyHostToDevice);
dim3 block(512,1);
dim3 grid(divUp(voxelSegNum,block.x), 1);
if(grid.x>65535)
{
//printf("GPU:: grid size is too big !! (%d,%d)\n",grid.x,grid.y);
grid.x = 65535;
}
//else
//printf("GPU:: grid size : (%d,%d)\n",grid.x,grid.y);
CalcVolumeCostAverage_vga_float<<<grid, block>>>(voxelCoordsGPU,voxelSegNum,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
cudaGetLastError();
//dst.download(resultImage);
//ImageSC2(resultImage);
cudaDeviceSynchronize();
cudaMemcpy(&occupancyOutput[i*voxelSegNum],occupancyOutputGPU,voxelSegNum*sizeof(float),cudaMemcpyDeviceToHost);
cudaError_t error= cudaGetLastError();
if(error != cudaSuccess)
{
// something's gone wrong
// print out the CUDA error as a string
printf("CUDA Error: %s\n", cudaGetErrorString(error));
}
}
//printf("CUDA Iteration %d/%d\n",i,iterNum);//
}
//delete[] voxelCoords;
cudaFree(voxelCoordsGPU);
cudaFree(foreGroundParmGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
cudaMemGetInfo(&free,&total);
//printf("## CUDA:: return :: free%f MB, total %f MB\n",free/1e6,total/1e6);
return true;
}
bool DetectionCostVolumeGeneration_float_GPU_average_hd(float* voxelCoords,int voxelNum, vector<Mat_<float> >& foregroundVect,vector<Mat_<float>>& projectMatVect,float* occupancyOutput,bool bOnlyPositiveValueAvg)
{
size_t free,total;
cudaMemGetInfo(&free,&total);
//printf("## CUDA:: before ::a free%f MB, total %f MB\n",free/1e6,total/1e6);
assert(foregroundVect.size() == projectMatVect.size());
float* foreGroundParmGPU =NULL; //forground image data (or detection cost map)
//cudaMalloc((void**) &foreGroundParmGPU, 307200 * foregroundVect.size()*sizeof(float)); //640x480 = 307200
cudaMalloc((void**) &foreGroundParmGPU, 2073600 * foregroundVect.size()*sizeof(float)); //1920x1080= 2073600
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 1 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
return false;
}
for(int i=0;i<foregroundVect.size();++i)
{
cudaMemcpy(&foreGroundParmGPU[2073600*i],foregroundVect[i].data,2073600*sizeof(float),cudaMemcpyHostToDevice);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 2 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
return false;
}
}
float* projectMatParmGPU=NULL;
int sizeOfOneUnitP = 12 *sizeof(float);
cudaMalloc((void**) &projectMatParmGPU, sizeOfOneUnitP* foregroundVect.size());
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 3 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(projectMatParmGPU);
return false;
}
for(int i=0;i<foregroundVect.size();++i)
{
cudaMemcpy(&projectMatParmGPU[12*i],projectMatVect[i].data,sizeOfOneUnitP,cudaMemcpyHostToDevice);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 4 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(projectMatParmGPU);
return false;
}
}
////////////////////////////////////////////////////////////////////////////////
//// Voxel memory allocation
////////////////////////////////////////////////////////////////////////////////
int voxelSegNum = 2e8; //About 200 MB Voxel
int iterNum = voxelNum/float(voxelSegNum);
int voxelNuminFinalIter;
if(voxelNum%(voxelSegNum)>0)
{
iterNum++;
voxelNuminFinalIter = voxelNum%(voxelSegNum);
}
if(iterNum ==1)
voxelSegNum = voxelNum;
//printf("GPU interation Num %d\n",iterNum);
//allocate voxel Pos Memory
float* occupancyOutputGPU=NULL; //Contains 3D volume costamp
cudaMalloc((void**) &occupancyOutputGPU, voxelSegNum*sizeof(float)); //voxelSegNum * 4Byte
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 5 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
float* voxelCoordsGPU=NULL;
cudaMalloc((void**) &voxelCoordsGPU, voxelSegNum*sizeof(float)*3); //voxelSegNum * 12Byte
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 7 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
cudaMemGetInfo(&free,&total);
//printf("## CUDA:: after :: free%f MB, total %f MB\n",free/1e6,total/1e6);
for(int i=0;i<iterNum;++i)
{
cudaMemset((void*) occupancyOutputGPU, 0, voxelSegNum*sizeof(float));
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 6 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
if(i == iterNum-1 && voxelNuminFinalIter>0) //Final iterlation
{
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 8 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
float* tempVoxelPtr = &voxelCoords[i*voxelSegNum*3];
cudaMemcpy(voxelCoordsGPU,tempVoxelPtr,voxelNuminFinalIter*sizeof(float)*3,cudaMemcpyHostToDevice);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 9 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
dim3 block(512,1);
dim3 grid(divUp(voxelNuminFinalIter,block.x), 1);
if(grid.x>65535)
{
//printf("GPU:: grid size is too big !! (%d,%d)\n",grid.x,grid.y);
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
grid.x = 65535;
}
//else
//printf("GPU:: grid size : (%d,%d)\n",grid.x,grid.y);
if(bOnlyPositiveValueAvg==false) //avg = valuSum / NumberOfView_ProjectedOnImage
CalcVolumeCostAverage_hd_float<<<grid, block>>>(voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
else //avg = valuSum / (NumberOfView_ProjectedOnImage & positiveValidValue)
CalcVolumeCostAverage_hd_float_onlyPositiveCost<<<grid, block>>>(voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
//CalcVolumeCostWithOrientation<<<grid, block>>>(voxelCoordsGPU,voxelNuminFinalIter,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,(int)camCenters.size(),camCenterParmGPU,occupancyOutputGPU);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 10 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
//dst.download(resultImage);
//ImageSC2(resultImage);
cudaDeviceSynchronize();
cudaMemcpy(&occupancyOutput[i*voxelSegNum],occupancyOutputGPU,voxelNuminFinalIter*sizeof(float),cudaMemcpyDeviceToHost);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("####################### CUDA Error 11 : %s ###################\n", cudaGetErrorString(error));
cudaFree(foreGroundParmGPU);
cudaFree(voxelCoordsGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
return false;
}
}
else
{
float* tempVoxelPtr = &voxelCoords[i*voxelSegNum*3];//i*voxelSegNum];
cudaMemcpy(voxelCoordsGPU,tempVoxelPtr,voxelSegNum*sizeof(float)*3,cudaMemcpyHostToDevice);
dim3 block(512,1);
dim3 grid(divUp(voxelSegNum,block.x), 1);
if(grid.x>65535)
{
//printf("GPU:: grid size is too big !! (%d,%d)\n",grid.x,grid.y);
grid.x = 65535;
}
//else
//printf("GPU:: grid size : (%d,%d)\n",grid.x,grid.y);
CalcVolumeCostAverage_hd_float<<<grid, block>>>(voxelCoordsGPU,voxelSegNum,foreGroundParmGPU,foregroundVect.size(),projectMatParmGPU,occupancyOutputGPU);
cudaGetLastError();
//dst.download(resultImage);
//ImageSC2(resultImage);
cudaDeviceSynchronize();
cudaMemcpy(&occupancyOutput[i*voxelSegNum],occupancyOutputGPU,voxelSegNum*sizeof(float),cudaMemcpyDeviceToHost);
cudaError_t error= cudaGetLastError();
if(error != cudaSuccess)
{
// something's gone wrong
// print out the CUDA error as a string
printf("CUDA Error: %s\n", cudaGetErrorString(error));
}
}
//printf("CUDA Iteration %d/%d\n",i,iterNum);//
}
//delete[] voxelCoords;
cudaFree(voxelCoordsGPU);
cudaFree(foreGroundParmGPU);
cudaFree(projectMatParmGPU);
cudaFree(occupancyOutputGPU);
cudaMemGetInfo(&free,&total);
//printf("## CUDA:: return :: free%f MB, total %f MB\n",free/1e6,total/1e6);
return true;
}
|
d883ae3d9091b368b8843e90a6affe261819974a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* This program uses the device CURAND API to calculate what
* proportion of pseudo-random ints have low bit set.
* It then generates uniform results to calculate how many
* are greater than .5.
* It then generates normal results to calculate how many
* are within one standard deviation of the mean.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
__global__ void setup_kernel(hiprandState_t *state)
{
int id = threadIdx.x + blockIdx.x * 64;
/* Each thread gets same seed, a different sequence
number, no offset */
hiprand_init(1234, id, 0, &state[id]);
//hiprand_init(id, 0, 0, &state[id]);
}
__global__ void generate_normal_kernel(hiprandState_t *state,
int n,
float *result)
{
int id = threadIdx.x + blockIdx.x * 64;
float x;
/* Copy state to local memory for efficiency */
hiprandState_t localState = state[id];
/* Generate pseudo-random normals */
for(int i = 0; i < n; i++) {
x = hiprand_normal(&localState);
}
/* Copy state back to global memory */
state[id] = localState;
/* Store results */
result[id] = x;
}
int main(int argc, char *argv[])
{
int i;
unsigned int total;
hiprandState_t *devStates;
//unsigned int *devResults, *hostResults;
float *devResults, *hostResults;
bool useMRG = 0;
bool usePHILOX = 0;
int sampleCount = 10000;
bool doubleSupported = 0;
int device;
struct hipDeviceProp_t properties;
/* check for double precision support */
CUDA_CALL(hipGetDevice(&device));
CUDA_CALL(hipGetDeviceProperties(&properties,device));
if ( properties.major >= 2 || (properties.major == 1 && properties.minor >= 3) ) {
doubleSupported = 1;
}
/* Allocate space for results on host */
hostResults = (float *)calloc(64 * 64, sizeof(float));
/* Allocate space for results on device */
CUDA_CALL(hipMalloc((void **)&devResults, 64 * 64 *
sizeof(float)));
/* Set results to 0 */
CUDA_CALL(hipMemset(devResults, 0, 64 * 64 *
sizeof(float)));
/* Allocate space for prng states on device */
CUDA_CALL(hipMalloc((void **)&devStates, 64 * 64 *
sizeof(hiprandState_t)));
/* Setup prng states */
hipLaunchKernelGGL(( setup_kernel), dim3(64), dim3(64), 0, 0, devStates);
/* Set results to 0 */
CUDA_CALL(hipMemset(devResults, 0, 64 * 64 *
sizeof(float)));
/* Generate and use normal pseudo-random */
for(i = 0; i < 50; i++) {
hipLaunchKernelGGL(( generate_normal_kernel), dim3(64), dim3(64), 0, 0, devStates, sampleCount, devResults);
}
/* Copy device memory to host */
CUDA_CALL(hipMemcpy(hostResults, devResults, 64 * 64 *
sizeof(float), hipMemcpyDeviceToHost));
/* Show result */
total = 0;
for(i = 0; i < 64 * 64; i++) {
printf("%lf\n",hostResults[i]);
}
/* Cleanup */
CUDA_CALL(hipFree(devStates));
CUDA_CALL(hipFree(devResults));
free(hostResults);
return EXIT_SUCCESS;
}
|
d883ae3d9091b368b8843e90a6affe261819974a.cu
|
/*
* This program uses the device CURAND API to calculate what
* proportion of pseudo-random ints have low bit set.
* It then generates uniform results to calculate how many
* are greater than .5.
* It then generates normal results to calculate how many
* are within one standard deviation of the mean.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
__global__ void setup_kernel(curandState *state)
{
int id = threadIdx.x + blockIdx.x * 64;
/* Each thread gets same seed, a different sequence
number, no offset */
curand_init(1234, id, 0, &state[id]);
//curand_init(id, 0, 0, &state[id]);
}
__global__ void generate_normal_kernel(curandState *state,
int n,
float *result)
{
int id = threadIdx.x + blockIdx.x * 64;
float x;
/* Copy state to local memory for efficiency */
curandState localState = state[id];
/* Generate pseudo-random normals */
for(int i = 0; i < n; i++) {
x = curand_normal(&localState);
}
/* Copy state back to global memory */
state[id] = localState;
/* Store results */
result[id] = x;
}
int main(int argc, char *argv[])
{
int i;
unsigned int total;
curandState *devStates;
//unsigned int *devResults, *hostResults;
float *devResults, *hostResults;
bool useMRG = 0;
bool usePHILOX = 0;
int sampleCount = 10000;
bool doubleSupported = 0;
int device;
struct cudaDeviceProp properties;
/* check for double precision support */
CUDA_CALL(cudaGetDevice(&device));
CUDA_CALL(cudaGetDeviceProperties(&properties,device));
if ( properties.major >= 2 || (properties.major == 1 && properties.minor >= 3) ) {
doubleSupported = 1;
}
/* Allocate space for results on host */
hostResults = (float *)calloc(64 * 64, sizeof(float));
/* Allocate space for results on device */
CUDA_CALL(cudaMalloc((void **)&devResults, 64 * 64 *
sizeof(float)));
/* Set results to 0 */
CUDA_CALL(cudaMemset(devResults, 0, 64 * 64 *
sizeof(float)));
/* Allocate space for prng states on device */
CUDA_CALL(cudaMalloc((void **)&devStates, 64 * 64 *
sizeof(curandState)));
/* Setup prng states */
setup_kernel<<<64, 64>>>(devStates);
/* Set results to 0 */
CUDA_CALL(cudaMemset(devResults, 0, 64 * 64 *
sizeof(float)));
/* Generate and use normal pseudo-random */
for(i = 0; i < 50; i++) {
generate_normal_kernel<<<64, 64>>>(devStates, sampleCount, devResults);
}
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostResults, devResults, 64 * 64 *
sizeof(float), cudaMemcpyDeviceToHost));
/* Show result */
total = 0;
for(i = 0; i < 64 * 64; i++) {
printf("%lf\n",hostResults[i]);
}
/* Cleanup */
CUDA_CALL(cudaFree(devStates));
CUDA_CALL(cudaFree(devResults));
free(hostResults);
return EXIT_SUCCESS;
}
|
175d5bd78fb2a2909fba721696678ecbdc44e00c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
void printArray(const float* x, int n)
{
std::cout << "(";
for (int i = 0; i < n; i++)
{
std::cout << x[i] << ", ";
}
std::cout << ")" << std::endl;
}
// My attempt at using shared mem among blocks. Runs slightly slower than my nave
// algorithm did but I like this more as it is at least an attempt at optimization
// even though it runs much slower than it should had it.
__global__
void f_h(const int n, const float h, const float *x, float *y, int memSize)
{
extern __shared__ float x_reg[];
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const float coef = 1 / (n * h) * .3989422804;
float sum = 0;
float x_val = x[idx];
for (int i = 0; i < n; i += memSize)
{
for (int j = 0; j < memSize; j += blockDim.x)
{
if(i + j + threadIdx.x < n)
{
x_reg[j + threadIdx.x] = x[j + i + threadIdx.x];
}
}
__syncthreads();
if (idx >= n)
{
return;
}
for (int k = 0; k < memSize && k+i < n; k++)
{
float val = (x_val-x_reg[k]) / h;
float k_x = exp(-(val * val) / 2);
sum = sum + k_x;
}
}
y[idx] = coef * sum;
}
void gpuCall(int n, float h, const float *x_v, float *y_v)
{
int arrSize = n*sizeof(float);
float *x, *y;
hipMalloc(&x, arrSize);
hipMalloc(&y, arrSize);
hipMemcpy(x, x_v, arrSize, hipMemcpyHostToDevice);
hipMemcpy(y, y_v, arrSize, hipMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
int memSize = blockSize * 4;
hipLaunchKernelGGL(( f_h), dim3(numBlocks), dim3(blockSize), memSize * sizeof(float), 0, n, h, x, y, memSize);
hipDeviceSynchronize();
hipMemcpy(y_v, y, arrSize, hipMemcpyDeviceToHost);
hipFree(x);
hipFree(y);
}
|
175d5bd78fb2a2909fba721696678ecbdc44e00c.cu
|
#include <cuda.h>
#include <iostream>
#include <vector>
void printArray(const float* x, int n)
{
std::cout << "(";
for (int i = 0; i < n; i++)
{
std::cout << x[i] << ", ";
}
std::cout << ")" << std::endl;
}
// My attempt at using shared mem among blocks. Runs slightly slower than my naïve
// algorithm did but I like this more as it is at least an attempt at optimization
// even though it runs much slower than it should had it.
__global__
void f_h(const int n, const float h, const float *x, float *y, int memSize)
{
extern __shared__ float x_reg[];
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const float coef = 1 / (n * h) * .3989422804;
float sum = 0;
float x_val = x[idx];
for (int i = 0; i < n; i += memSize)
{
for (int j = 0; j < memSize; j += blockDim.x)
{
if(i + j + threadIdx.x < n)
{
x_reg[j + threadIdx.x] = x[j + i + threadIdx.x];
}
}
__syncthreads();
if (idx >= n)
{
return;
}
for (int k = 0; k < memSize && k+i < n; k++)
{
float val = (x_val-x_reg[k]) / h;
float k_x = exp(-(val * val) / 2);
sum = sum + k_x;
}
}
y[idx] = coef * sum;
}
void gpuCall(int n, float h, const float *x_v, float *y_v)
{
int arrSize = n*sizeof(float);
float *x, *y;
cudaMalloc(&x, arrSize);
cudaMalloc(&y, arrSize);
cudaMemcpy(x, x_v, arrSize, cudaMemcpyHostToDevice);
cudaMemcpy(y, y_v, arrSize, cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
int memSize = blockSize * 4;
f_h<<<numBlocks, blockSize, memSize * sizeof(float)>>>(n, h, x, y, memSize);
cudaDeviceSynchronize();
cudaMemcpy(y_v, y, arrSize, cudaMemcpyDeviceToHost);
cudaFree(x);
cudaFree(y);
}
|
c5ce85f12aac6b36f08c27d3efaa8e97a31154a5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matvec.h"
#include "constants.h"
__global__ void vertexToEdgeKernel(double *d_edgMat, double *d_datMat,
int *d_elmVtxMat, int datNum, int elmNum)
{
int elmIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( elmIdx < elmNum )
{
int q0Idx = d_elmVtxMat[ elmIdx];
int q1Idx = d_elmVtxMat[ elmNum + elmIdx];
int q2Idx = d_elmVtxMat[2 * elmNum + elmIdx];
int q3Idx = d_elmVtxMat[3 * elmNum + elmIdx];
vector q0Vec, q1Vec, q2Vec, q3Vec;
getVector(q0Vec, d_datMat, q0Idx, datNum);
getVector(q1Vec, d_datMat, q1Idx, datNum);
getVector(q2Vec, d_datMat, q2Idx, datNum);
getVector(q3Vec, d_datMat, q3Idx, datNum);
vector q10Vec, q20Vec, q30Vec;
vectorSubtract(q10Vec, q1Vec, q0Vec);
vectorSubtract(q20Vec, q2Vec, q0Vec);
vectorSubtract(q30Vec, q3Vec, q0Vec);
setEdge(d_edgMat, q10Vec, q20Vec, q30Vec, elmIdx, elmNum);
}
return;
}
void vertexToEdge(double *d_edgMat, double *d_datMat, int *d_elmVtxMat, int datNum, int elmNum)
{
int blkNum = (elmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( vertexToEdgeKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_edgMat, d_datMat, d_elmVtxMat, datNum, elmNum);
return;
}
|
c5ce85f12aac6b36f08c27d3efaa8e97a31154a5.cu
|
#include "matvec.h"
#include "constants.h"
__global__ void vertexToEdgeKernel(double *d_edgMat, double *d_datMat,
int *d_elmVtxMat, int datNum, int elmNum)
{
int elmIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( elmIdx < elmNum )
{
int q0Idx = d_elmVtxMat[ elmIdx];
int q1Idx = d_elmVtxMat[ elmNum + elmIdx];
int q2Idx = d_elmVtxMat[2 * elmNum + elmIdx];
int q3Idx = d_elmVtxMat[3 * elmNum + elmIdx];
vector q0Vec, q1Vec, q2Vec, q3Vec;
getVector(q0Vec, d_datMat, q0Idx, datNum);
getVector(q1Vec, d_datMat, q1Idx, datNum);
getVector(q2Vec, d_datMat, q2Idx, datNum);
getVector(q3Vec, d_datMat, q3Idx, datNum);
vector q10Vec, q20Vec, q30Vec;
vectorSubtract(q10Vec, q1Vec, q0Vec);
vectorSubtract(q20Vec, q2Vec, q0Vec);
vectorSubtract(q30Vec, q3Vec, q0Vec);
setEdge(d_edgMat, q10Vec, q20Vec, q30Vec, elmIdx, elmNum);
}
return;
}
void vertexToEdge(double *d_edgMat, double *d_datMat, int *d_elmVtxMat, int datNum, int elmNum)
{
int blkNum = (elmNum - 1) / BLKDIM + 1;
vertexToEdgeKernel <<<blkNum, BLKDIM>>> (d_edgMat, d_datMat, d_elmVtxMat, datNum, elmNum);
return;
}
|
38c9482cecb63e4dd979970c771ca985237a3a3c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef GRAVITY
#ifdef POTENTIAL_CUFFT
#include "potential_CUFFT_3D.h"
Potential_CUFFT_3D::Potential_CUFFT_3D( void ){}
void Potential_CUFFT_3D::Initialize( Grav3D Grav){
Lbox_x = Grav.Lbox_x;
Lbox_y = Grav.Lbox_y;
Lbox_z = Grav.Lbox_z;
nx_total = Grav.nx_total;
ny_total = Grav.ny_total;
nz_total = Grav.nz_total;
nx_local = Grav.nx_local;
ny_local = Grav.ny_local;
nz_local = Grav.nz_local;
dx = Grav.dx;
dy = Grav.dy;
dz = Grav.dz;
n_cells_local = nx_local*ny_local*nz_local;
n_cells_total = nx_total*ny_total*nz_total;
chprintf( " Using Poisson Solver: CUFFT\n");
chprintf( " CUFFT: L[ %f %f %f ] N[ %d %d %d ] dx[ %f %f %f ]\n", Lbox_x, Lbox_y, Lbox_z, nx_local, ny_local, nz_local, dx, dy, dz );
AllocateMemory_CPU();
chprintf( " CUFFT: Creating FFT plan...\n");
hipfftPlan3d( &plan_cufft_fwd, nz_local, ny_local, nx_local, HIPFFT_Z2Z);
hipfftPlan3d( &plan_cufft_bwd, nz_local, ny_local, nx_local, HIPFFT_Z2Z);
chprintf( " CUFFT: Computing K for Gravity Green Funtion\n");
hipMalloc( (void**)&F.G_d, n_cells_local*sizeof(Real));
Get_K_for_Green_function();
threads_per_block = 1024;
blocks_per_grid = (( n_cells_local - 1 ) / threads_per_block) + 1;
chprintf( " CUFFT: Using %d threads and %d blocks for applying G funtion: %d \n", threads_per_block, blocks_per_grid, threads_per_block*blocks_per_grid);
}
void Potential_CUFFT_3D::AllocateMemory_CPU( void ){
F.output_h = (Complex_cufft *) malloc(n_cells_local*sizeof(Complex_cufft));
F.G_h = (Real *) malloc(n_cells_local*sizeof(Real));
}
void Potential_CUFFT_3D::AllocateMemory_GPU( void ){
hipMalloc( (void**)&F.input_real_d, n_cells_local*sizeof(Real_cufft));
hipMalloc( (void**)&F.input_d, n_cells_local*sizeof(Complex_cufft));
hipMalloc( (void**)&F.transform_d, n_cells_local*sizeof(Complex_cufft));
hipMalloc( (void**)&F.output_d, n_cells_local*sizeof(Complex_cufft));
}
void Potential_CUFFT_3D::FreeMemory_GPU( void ){
hipFree( F.input_real_d );
hipFree( F.input_d );
hipFree( F.output_d );
hipFree( F.transform_d );
}
void Potential_CUFFT_3D::Reset( void ){
free( F.output_h );
free( F.G_h );
hipFree( F.G_d );
}
void Potential_CUFFT_3D::Get_K_for_Green_function( void){
Real kx, ky, kz, Gx, Gy, Gz, G;
int id;
for (int k=0; k<nz_local; k++){
kz = 2*M_PI*k/nz_local;
Gz = sin( kz/2 );
for (int j=0; j<ny_local; j++){
ky = 2*M_PI*j/ny_local;
Gy = sin( ky/2 );
for ( int i=0; i<nx_local; i++){
id = i + j*nx_local + k*nx_local*ny_local;
kx = 2*M_PI*i/nx_local;
Gx = sin( kx/2 );
G = -1 / ( Gx*Gx + Gy*Gy + Gz*Gz ) * dx * dx / 4 ;
if ( id == 0 ) G = 1;
F.G_h[id] = G;
// F.G_h[id] = 0.1;
}
}
}
hipMemcpy( F.G_d, F.G_h, n_cells_local*sizeof(Real), hipMemcpyHostToDevice );
}
__global__
void Copy_Input_Kernel( int n_cells, Real *input_h, Complex_cufft *input_d ){
int t_id = threadIdx.x + blockIdx.x*blockDim.x;
if ( t_id < n_cells ){
input_d[t_id].x = input_h[t_id];
input_d[t_id].y = 0.0;
}
}
void Potential_CUFFT_3D::Copy_Output( Grav3D &Grav ){
hipMemcpy( F.output_h, F.output_d, n_cells_local*sizeof(Complex_cufft), hipMemcpyDeviceToHost );
int id, id_pot;
int i, k, j;
for (k=0; k<nz_local; k++) {
for (j=0; j<ny_local; j++) {
for (i=0; i<nx_local; i++) {
id = i + j*nx_local + k*nx_local*ny_local;
id_pot = (i+N_GHOST_POTENTIAL) + (j+N_GHOST_POTENTIAL)*(nx_local+2*N_GHOST_POTENTIAL) + (k+N_GHOST_POTENTIAL)*(nx_local+2*N_GHOST_POTENTIAL)*(ny_local+2*N_GHOST_POTENTIAL);
Grav.F.potential_h[id_pot] = F.output_h[id].x / n_cells_local;
// Grav.F.density_h[id] = F.G_h[id];
}
}
}
}
void Potential_CUFFT_3D::Copy_Input( Grav3D &Grav ){
hipMemcpy( F.input_real_d, Grav.F.density_h, n_cells_local*sizeof(Real_cufft), hipMemcpyHostToDevice );
hipLaunchKernelGGL(( Copy_Input_Kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, n_cells_local, F.input_real_d, F.input_d );
}
__global__
void Apply_G_Funtion( int n_cells, Complex_cufft *transform, Real *G ){
int t_id = threadIdx.x + blockIdx.x*blockDim.x;
Real G_val;
if ( t_id < n_cells ){
G_val = G[t_id];
if ( t_id == 0 ) G_val = 1.0;
transform[t_id].x *= G_val;
transform[t_id].y *= G_val;
if ( t_id == 0 ){
transform[t_id].x = 0;
transform[t_id].y = 0;
}
}
}
Real Potential_CUFFT_3D::Get_Potential( Grav3D &Grav ){
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
AllocateMemory_GPU();
Copy_Input( Grav );
hipfftExecZ2Z( plan_cufft_fwd, F.input_d, F.transform_d, HIPFFT_FORWARD );
hipLaunchKernelGGL(( Apply_G_Funtion), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, n_cells_local, F.transform_d, F.G_d );
hipfftExecZ2Z( plan_cufft_bwd, F.transform_d, F.output_d, HIPFFT_BACKWARD );
Copy_Output( Grav );
FreeMemory_GPU();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// chprintf( " CUFFT: Potential Time = %f msecs\n", milliseconds);
return (Real) milliseconds;
}
#endif //POTENTIAL_CUFFT
#endif //GRAVITY
|
38c9482cecb63e4dd979970c771ca985237a3a3c.cu
|
#ifdef GRAVITY
#ifdef POTENTIAL_CUFFT
#include "potential_CUFFT_3D.h"
Potential_CUFFT_3D::Potential_CUFFT_3D( void ){}
void Potential_CUFFT_3D::Initialize( Grav3D Grav){
Lbox_x = Grav.Lbox_x;
Lbox_y = Grav.Lbox_y;
Lbox_z = Grav.Lbox_z;
nx_total = Grav.nx_total;
ny_total = Grav.ny_total;
nz_total = Grav.nz_total;
nx_local = Grav.nx_local;
ny_local = Grav.ny_local;
nz_local = Grav.nz_local;
dx = Grav.dx;
dy = Grav.dy;
dz = Grav.dz;
n_cells_local = nx_local*ny_local*nz_local;
n_cells_total = nx_total*ny_total*nz_total;
chprintf( " Using Poisson Solver: CUFFT\n");
chprintf( " CUFFT: L[ %f %f %f ] N[ %d %d %d ] dx[ %f %f %f ]\n", Lbox_x, Lbox_y, Lbox_z, nx_local, ny_local, nz_local, dx, dy, dz );
AllocateMemory_CPU();
chprintf( " CUFFT: Creating FFT plan...\n");
cufftPlan3d( &plan_cufft_fwd, nz_local, ny_local, nx_local, CUFFT_Z2Z);
cufftPlan3d( &plan_cufft_bwd, nz_local, ny_local, nx_local, CUFFT_Z2Z);
chprintf( " CUFFT: Computing K for Gravity Green Funtion\n");
cudaMalloc( (void**)&F.G_d, n_cells_local*sizeof(Real));
Get_K_for_Green_function();
threads_per_block = 1024;
blocks_per_grid = (( n_cells_local - 1 ) / threads_per_block) + 1;
chprintf( " CUFFT: Using %d threads and %d blocks for applying G funtion: %d \n", threads_per_block, blocks_per_grid, threads_per_block*blocks_per_grid);
}
void Potential_CUFFT_3D::AllocateMemory_CPU( void ){
F.output_h = (Complex_cufft *) malloc(n_cells_local*sizeof(Complex_cufft));
F.G_h = (Real *) malloc(n_cells_local*sizeof(Real));
}
void Potential_CUFFT_3D::AllocateMemory_GPU( void ){
cudaMalloc( (void**)&F.input_real_d, n_cells_local*sizeof(Real_cufft));
cudaMalloc( (void**)&F.input_d, n_cells_local*sizeof(Complex_cufft));
cudaMalloc( (void**)&F.transform_d, n_cells_local*sizeof(Complex_cufft));
cudaMalloc( (void**)&F.output_d, n_cells_local*sizeof(Complex_cufft));
}
void Potential_CUFFT_3D::FreeMemory_GPU( void ){
cudaFree( F.input_real_d );
cudaFree( F.input_d );
cudaFree( F.output_d );
cudaFree( F.transform_d );
}
void Potential_CUFFT_3D::Reset( void ){
free( F.output_h );
free( F.G_h );
cudaFree( F.G_d );
}
void Potential_CUFFT_3D::Get_K_for_Green_function( void){
Real kx, ky, kz, Gx, Gy, Gz, G;
int id;
for (int k=0; k<nz_local; k++){
kz = 2*M_PI*k/nz_local;
Gz = sin( kz/2 );
for (int j=0; j<ny_local; j++){
ky = 2*M_PI*j/ny_local;
Gy = sin( ky/2 );
for ( int i=0; i<nx_local; i++){
id = i + j*nx_local + k*nx_local*ny_local;
kx = 2*M_PI*i/nx_local;
Gx = sin( kx/2 );
G = -1 / ( Gx*Gx + Gy*Gy + Gz*Gz ) * dx * dx / 4 ;
if ( id == 0 ) G = 1;
F.G_h[id] = G;
// F.G_h[id] = 0.1;
}
}
}
cudaMemcpy( F.G_d, F.G_h, n_cells_local*sizeof(Real), cudaMemcpyHostToDevice );
}
__global__
void Copy_Input_Kernel( int n_cells, Real *input_h, Complex_cufft *input_d ){
int t_id = threadIdx.x + blockIdx.x*blockDim.x;
if ( t_id < n_cells ){
input_d[t_id].x = input_h[t_id];
input_d[t_id].y = 0.0;
}
}
void Potential_CUFFT_3D::Copy_Output( Grav3D &Grav ){
cudaMemcpy( F.output_h, F.output_d, n_cells_local*sizeof(Complex_cufft), cudaMemcpyDeviceToHost );
int id, id_pot;
int i, k, j;
for (k=0; k<nz_local; k++) {
for (j=0; j<ny_local; j++) {
for (i=0; i<nx_local; i++) {
id = i + j*nx_local + k*nx_local*ny_local;
id_pot = (i+N_GHOST_POTENTIAL) + (j+N_GHOST_POTENTIAL)*(nx_local+2*N_GHOST_POTENTIAL) + (k+N_GHOST_POTENTIAL)*(nx_local+2*N_GHOST_POTENTIAL)*(ny_local+2*N_GHOST_POTENTIAL);
Grav.F.potential_h[id_pot] = F.output_h[id].x / n_cells_local;
// Grav.F.density_h[id] = F.G_h[id];
}
}
}
}
void Potential_CUFFT_3D::Copy_Input( Grav3D &Grav ){
cudaMemcpy( F.input_real_d, Grav.F.density_h, n_cells_local*sizeof(Real_cufft), cudaMemcpyHostToDevice );
Copy_Input_Kernel<<<blocks_per_grid, threads_per_block>>>( n_cells_local, F.input_real_d, F.input_d );
}
__global__
void Apply_G_Funtion( int n_cells, Complex_cufft *transform, Real *G ){
int t_id = threadIdx.x + blockIdx.x*blockDim.x;
Real G_val;
if ( t_id < n_cells ){
G_val = G[t_id];
if ( t_id == 0 ) G_val = 1.0;
transform[t_id].x *= G_val;
transform[t_id].y *= G_val;
if ( t_id == 0 ){
transform[t_id].x = 0;
transform[t_id].y = 0;
}
}
}
Real Potential_CUFFT_3D::Get_Potential( Grav3D &Grav ){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
AllocateMemory_GPU();
Copy_Input( Grav );
cufftExecZ2Z( plan_cufft_fwd, F.input_d, F.transform_d, CUFFT_FORWARD );
Apply_G_Funtion<<<blocks_per_grid, threads_per_block>>>( n_cells_local, F.transform_d, F.G_d );
cufftExecZ2Z( plan_cufft_bwd, F.transform_d, F.output_d, CUFFT_INVERSE );
Copy_Output( Grav );
FreeMemory_GPU();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// chprintf( " CUFFT: Potential Time = %f msecs\n", milliseconds);
return (Real) milliseconds;
}
#endif //POTENTIAL_CUFFT
#endif //GRAVITY
|
f25c69d8a6e109f0e8069a8e80e5b20407ee2f1e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <sys/resource.h>
typedef struct{
char** name;
char* chrom_c;
//int* chrom;
long* pos;
//long* c_pos;
//char** rest;
}SNP;
typedef struct{
char* snp_name;
int* a_id; //length is the number of animals
char* ab1;
char* ab2;
int* ab;
}Sample;
int NSNPS;
int NSAMPLES;
void read_files(char* map_path, char* snp_path, char** data_string, char** snps_data){
FILE *fd;
int err;
int num_lines = -1;
char** header_array;
int i;
/***********************Allocate string for header info**********/
header_array = (char**) malloc( 10 * sizeof(char*));
for(i = 0; i < 10; i++){
header_array[i] = (char*)malloc(100);
}
/*****************************************************************/
fd = fopen(snp_path, "r");
/*******Getting number of SNP and Sample from header****/
do {
err = fscanf(fd, "%[^\n]\n", header_array[++num_lines]);
} while(err != EOF && num_lines < 10);
err = sscanf(header_array[5], "Total SNP %d", &NSNPS);
err = sscanf(header_array[7], "Total Sample %d", &NSAMPLES);
/***********************************************************/
/*************Getting Final Report Data***********************************/
//char** data_string;
data_string = (char**) malloc(NSNPS * NSAMPLES * sizeof(char*));
for(i = 0; i < NSNPS*NSAMPLES; i++){
data_string[i] = (char*)malloc(100);
}
num_lines = -1;
do {
err = fscanf(fd, "%[^\n]\n", data_strings[++num_lines]);
} while(err != EOF && num_lines < NSNPS*NSAMPLES);
fclose(fd);
/**************************************************************************/
/************************Getting MapFile Data******************************/
//char** snps_data;
char* junk = (char*) malloc(50 * sizeof(char));
snps_data = (char**) malloc(NSNPS * sizeof(char*));
for(i = 0; i < NSNPS; i++){
snps_data[i] = (char*)malloc(100);
}
fd = fopen(map_path, "r");
int num_lines2 = -1;
err = fscanf(fd, "%[^\n]\n", junk);
do {
err = fscanf(fd, "%[^\n]\n", snp_data[++num_lines2]);
} while(err != EOF && num_lines2 < NSNPS);
free(junk);
fclose(fd);
/**************************************************************************/
}
/*************functions for the radix sort**********************************/
__device__ void radixsort(SNP* snps, Sample* samples){
for(int i = 0; i < 64; i++){
sort_by_bit(snps, samples, i);
__syncthreads();
}
}
__device__ void sort_by_bit(SNP* snps, Sample* samples, int bit){
int i = threadIdx.x;
int size = blockDim.x;
int index;
/***temperary variables for the snps*****/
long t_pos = snps->pos[i];
char* t_name = snps->name[i];
char t_chrom_c = snps->chrom_c[i];
//char* t_rest = snps->rest[i];
Sample t_sample = samples[i];
int p_i = (t_pos >> bit) & 1;
snps->pos[i] = p_i;
__syncthreads();
int ones_before = scan(snps->pos);
int ones_total = snps->pos[size -1];
int zeros_total = size - ones_total;
__syncthreads();
if(p_i)
index = ones_before - 1 + zeros_total;
else
index = i - ones_before;
snps->pos[index] = t_pos;
snps->name[index] = t_name;
snps->chrom_c[index] = t_chrom_c;
//snps->rest[index] = t_rest;
samples[index] = t_sample;
}
/**************************************************************************/
__device__ long scan(long* x){
int i = threadIdx.x;
int n = blockDim.x;
int offset;
for ( offset = 1; offset < n; offset *= 2){
long temp;
if (i >= offset)
temp = x[i-offset];
__syncthreads();
if(i >= offset)
x[i] = temp + x[i];
__syncthreads();
}
return x[i];
}
void parse(SNP* snps, Sample* animals, char** data_string, char** snp_data){
int i, j, err;
snps->name = (char**) malloc(NSNPS * sizeof(char*));
snps->chrom_c = (char*) malloc(NSNPS * sizeof(char));
snps->pos = (long*) malloc(NSNPS * sizeof(long));
for(i = 0; i < NSNPS; i++)
snps->name[i] = (char*) malloc(50 * sizeof(char));
animals = (Sample*) malloc(NSNPS * sizeof(Sample));
for(i = 0; i < NSNPS; i++){
animals[i]->snp_name = (char*) malloc(50 * sizeof(char));
animals[i]->a_id = (int*) malloc(NSAMPLES * sizeof(int));
animals[i]->ab1 = (char*) malloc(NSAMPLES * sizeof(char));
animals[i]->ab2 = (char*) malloc(NSAMPLES * sizeof(char));
animals[i]->ab = (int*) malloc(NSAMPLES * sizeof(char));
}
for (i = 0; i < NSNPS; i++){
err = sscanf(snp_data[i], "%*d %s %c %ld %*s",
snps->names[i], snps->chrom_c[i], snps->pos[i], snps->rest[i]);
}
for(i = 0; i < NSNPS; i++){
for(j = 0; j < NSAMPLES; j++)
err = sscanf(data_string[i], "%s/t%d/t%*c/t%*c/t%*c/t%*c/t%c/t%c/t%*s",
animals[i]->snp_name, animals[i]->a_id[j], animals[i]->ab1[j], animals[i]->ab2[j]);
}
}
__global__ void sort(SNP* snps, Sample* samples, int nsamples){
int id = threadIdx.x;
radixsort(snps, samples);
for(int i = 0; i < nsamples; i++){
if (samples[id]->ab1[i] == 'A' && samples[id]->ab2[i] == 'A'){
samples[id]->ab[i] = 1;
}else if(samples[id]->ab1[i] == 'B' && samples[id]->ab2[i] == 'B'){
samples[id]->ab[i] = 2;
}else{
samples[id]->ab[i] = 3;
}
}
}
int main(int argc, char** argv){
SNP h_snps;
Sample* h_samples;
char* map_path, snp_path;
char** data_string, snps_data;
char** d_name;
char* d_chrom_c;
long* d_pos;
map_path = argv[1];
snp_path = argv[2];
read_files(map_path, snp_path, data_string, snps_data);
parse(&h_snps, h_samples, data_string, snps_data);
free(data_string);
free(snps_data);
hipMalloc((void**)&(d_pos), sizeof(long)*NSNPS);
hipMalloc((void**)&(d_chrom_c), sizeof(char)*NSNPS);
hipMalloc((void**)d_name, sizeof(char*)*NSNPS);
hipMemcpy(d_pos, (snps.pos), sizeof(long)*NSNPS, hipMemcpyHostToDevice);
hipMemcpy(d_chrom_c, (snps.chrom_c), sizeof(char)*NSNPS, hipMemcpyHostToDevice);
hipMemcpy(d_chrom_c, (snps.chrom_c), sizeof(char)*NSNPS, hipMemcpyHostToDevice);
}
|
f25c69d8a6e109f0e8069a8e80e5b20407ee2f1e.cu
|
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <sys/resource.h>
typedef struct{
char** name;
char* chrom_c;
//int* chrom;
long* pos;
//long* c_pos;
//char** rest;
}SNP;
typedef struct{
char* snp_name;
int* a_id; //length is the number of animals
char* ab1;
char* ab2;
int* ab;
}Sample;
int NSNPS;
int NSAMPLES;
void read_files(char* map_path, char* snp_path, char** data_string, char** snps_data){
FILE *fd;
int err;
int num_lines = -1;
char** header_array;
int i;
/***********************Allocate string for header info**********/
header_array = (char**) malloc( 10 * sizeof(char*));
for(i = 0; i < 10; i++){
header_array[i] = (char*)malloc(100);
}
/*****************************************************************/
fd = fopen(snp_path, "r");
/*******Getting number of SNP and Sample from header****/
do {
err = fscanf(fd, "%[^\n]\n", header_array[++num_lines]);
} while(err != EOF && num_lines < 10);
err = sscanf(header_array[5], "Total SNP %d", &NSNPS);
err = sscanf(header_array[7], "Total Sample %d", &NSAMPLES);
/***********************************************************/
/*************Getting Final Report Data***********************************/
//char** data_string;
data_string = (char**) malloc(NSNPS * NSAMPLES * sizeof(char*));
for(i = 0; i < NSNPS*NSAMPLES; i++){
data_string[i] = (char*)malloc(100);
}
num_lines = -1;
do {
err = fscanf(fd, "%[^\n]\n", data_strings[++num_lines]);
} while(err != EOF && num_lines < NSNPS*NSAMPLES);
fclose(fd);
/**************************************************************************/
/************************Getting MapFile Data******************************/
//char** snps_data;
char* junk = (char*) malloc(50 * sizeof(char));
snps_data = (char**) malloc(NSNPS * sizeof(char*));
for(i = 0; i < NSNPS; i++){
snps_data[i] = (char*)malloc(100);
}
fd = fopen(map_path, "r");
int num_lines2 = -1;
err = fscanf(fd, "%[^\n]\n", junk);
do {
err = fscanf(fd, "%[^\n]\n", snp_data[++num_lines2]);
} while(err != EOF && num_lines2 < NSNPS);
free(junk);
fclose(fd);
/**************************************************************************/
}
/*************functions for the radix sort**********************************/
__device__ void radixsort(SNP* snps, Sample* samples){
for(int i = 0; i < 64; i++){
sort_by_bit(snps, samples, i);
__syncthreads();
}
}
__device__ void sort_by_bit(SNP* snps, Sample* samples, int bit){
int i = threadIdx.x;
int size = blockDim.x;
int index;
/***temperary variables for the snps*****/
long t_pos = snps->pos[i];
char* t_name = snps->name[i];
char t_chrom_c = snps->chrom_c[i];
//char* t_rest = snps->rest[i];
Sample t_sample = samples[i];
int p_i = (t_pos >> bit) & 1;
snps->pos[i] = p_i;
__syncthreads();
int ones_before = scan(snps->pos);
int ones_total = snps->pos[size -1];
int zeros_total = size - ones_total;
__syncthreads();
if(p_i)
index = ones_before - 1 + zeros_total;
else
index = i - ones_before;
snps->pos[index] = t_pos;
snps->name[index] = t_name;
snps->chrom_c[index] = t_chrom_c;
//snps->rest[index] = t_rest;
samples[index] = t_sample;
}
/**************************************************************************/
__device__ long scan(long* x){
int i = threadIdx.x;
int n = blockDim.x;
int offset;
for ( offset = 1; offset < n; offset *= 2){
long temp;
if (i >= offset)
temp = x[i-offset];
__syncthreads();
if(i >= offset)
x[i] = temp + x[i];
__syncthreads();
}
return x[i];
}
void parse(SNP* snps, Sample* animals, char** data_string, char** snp_data){
int i, j, err;
snps->name = (char**) malloc(NSNPS * sizeof(char*));
snps->chrom_c = (char*) malloc(NSNPS * sizeof(char));
snps->pos = (long*) malloc(NSNPS * sizeof(long));
for(i = 0; i < NSNPS; i++)
snps->name[i] = (char*) malloc(50 * sizeof(char));
animals = (Sample*) malloc(NSNPS * sizeof(Sample));
for(i = 0; i < NSNPS; i++){
animals[i]->snp_name = (char*) malloc(50 * sizeof(char));
animals[i]->a_id = (int*) malloc(NSAMPLES * sizeof(int));
animals[i]->ab1 = (char*) malloc(NSAMPLES * sizeof(char));
animals[i]->ab2 = (char*) malloc(NSAMPLES * sizeof(char));
animals[i]->ab = (int*) malloc(NSAMPLES * sizeof(char));
}
for (i = 0; i < NSNPS; i++){
err = sscanf(snp_data[i], "%*d %s %c %ld %*s",
snps->names[i], snps->chrom_c[i], snps->pos[i], snps->rest[i]);
}
for(i = 0; i < NSNPS; i++){
for(j = 0; j < NSAMPLES; j++)
err = sscanf(data_string[i], "%s/t%d/t%*c/t%*c/t%*c/t%*c/t%c/t%c/t%*s",
animals[i]->snp_name, animals[i]->a_id[j], animals[i]->ab1[j], animals[i]->ab2[j]);
}
}
__global__ void sort(SNP* snps, Sample* samples, int nsamples){
int id = threadIdx.x;
radixsort(snps, samples);
for(int i = 0; i < nsamples; i++){
if (samples[id]->ab1[i] == 'A' && samples[id]->ab2[i] == 'A'){
samples[id]->ab[i] = 1;
}else if(samples[id]->ab1[i] == 'B' && samples[id]->ab2[i] == 'B'){
samples[id]->ab[i] = 2;
}else{
samples[id]->ab[i] = 3;
}
}
}
int main(int argc, char** argv){
SNP h_snps;
Sample* h_samples;
char* map_path, snp_path;
char** data_string, snps_data;
char** d_name;
char* d_chrom_c;
long* d_pos;
map_path = argv[1];
snp_path = argv[2];
read_files(map_path, snp_path, data_string, snps_data);
parse(&h_snps, h_samples, data_string, snps_data);
free(data_string);
free(snps_data);
cudaMalloc((void**)&(d_pos), sizeof(long)*NSNPS);
cudaMalloc((void**)&(d_chrom_c), sizeof(char)*NSNPS);
cudaMalloc((void**)d_name, sizeof(char*)*NSNPS);
cudaMemcpy(d_pos, (snps.pos), sizeof(long)*NSNPS, cudaMemcpyHostToDevice);
cudaMemcpy(d_chrom_c, (snps.chrom_c), sizeof(char)*NSNPS, cudaMemcpyHostToDevice);
cudaMemcpy(d_chrom_c, (snps.chrom_c), sizeof(char)*NSNPS, cudaMemcpyHostToDevice);
}
|
57739ed5d7ede90292f36707731a7910e19628ca.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <unistd.h>
#include <condition_variable> // NOLINT
#include <fstream>
#include <iomanip>
#include <string>
#include <thread> // NOLINT
#include <unordered_set>
#include <vector>
#include "google/protobuf/text_format.h"
#include <chrono>
#include "gtest/gtest.h"
#include "paddle/fluid/distributed/ps.pb.h"
#include "paddle/fluid/distributed/ps/service/env.h"
#include "paddle/fluid/distributed/ps/service/sendrecv.pb.h"
#include "paddle/fluid/distributed/ps/table/common_graph_table.h"
#include "paddle/fluid/distributed/ps/table/graph/graph_node.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/printf.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/fluid/framework/fleet/heter_ps/feature_value.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_sampler.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_comm.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h"
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
using namespace paddle::framework;
namespace platform = paddle::platform;
namespace operators = paddle::operators;
namespace memory = paddle::memory;
namespace distributed = paddle::distributed;
std::string input_file;
int exe_count = 100;
int use_nv = 1;
int fixed_key_size = 50000, sample_size = 32,
bfs_sample_nodes_in_each_shard = 10000, init_search_size = 1,
bfs_sample_edges = 20, gpu_num1 = 8, gpu_num = 8;
std::string gpu_str = "0,1,2,3,4,5,6,7";
int64_t *key[8];
std::vector<std::string> edges = {
std::string("37\t45\t0.34"), std::string("37\t145\t0.31"),
std::string("37\t112\t0.21"), std::string("96\t48\t1.4"),
std::string("96\t247\t0.31"), std::string("96\t111\t1.21"),
std::string("59\t45\t0.34"), std::string("59\t145\t0.31"),
std::string("59\t122\t0.21"), std::string("97\t48\t0.34"),
std::string("97\t247\t0.31"), std::string("97\t111\t0.21")};
// odd id:96 48 122 112
char edge_file_name[] = "test_edges.txt";
void prepare_file(char file_name[], std::vector<std::string> data) {
std::ofstream ofile;
ofile.open(file_name);
for (auto x : data) {
ofile << x << std::endl;
}
ofile.close();
}
void testSampleRate() {
#ifdef PADDLE_WITH_HETERPS
std::vector<int64_t> ids;
int start = 0;
pthread_rwlock_t rwlock;
pthread_rwlock_init(&rwlock, NULL);
{
::paddle::distributed::GraphParameter table_proto;
// table_proto.set_gpups_mode(false);
table_proto.set_shard_num(127);
table_proto.set_task_pool_size(24);
std::cerr << "initializing begin";
distributed::GraphTable graph_table;
graph_table.Initialize(table_proto);
std::cerr << "initializing done";
graph_table.Load(input_file, std::string("e>"));
int sample_actual_size = -1;
int step = fixed_key_size, cur = 0;
while (sample_actual_size != 0) {
std::unique_ptr<char[]> buffer;
graph_table.pull_graph_list(cur, step, buffer, sample_actual_size, false,
1);
int index = 0;
while (index < sample_actual_size) {
paddle::distributed::FeatureNode node;
node.recover_from_buffer(buffer.get() + index);
index += node.get_size(false);
// res.push_back(node);
ids.push_back(node.get_id());
int swap_pos = rand() % ids.size();
std::swap(ids[swap_pos], ids[(int)ids.size() - 1]);
}
cur = ids.size();
// if (sample_actual_size == 0) break;
// char *buff = buffer.get();
// for (int i = 0; i < sample_actual_size/sizeof(int64_t); i++) {
// ids.push_back(*((int64_t *)buff + i));
// int swap_pos = rand() % ids.size();
// std::swap(ids[swap_pos], ids[(int)ids.size() - 1]);
// }
// cur += sample_actual_size/sizeof(int64_t);
}
std::cerr << "load ids done" << std::endl;
std::vector<int64_t> sample_id[10], sample_neighbors[10];
std::vector<int> actual_size[10];
auto func = [&rwlock, &graph_table, &ids, &sample_id, &actual_size,
&sample_neighbors, &start](int i) {
while (true) {
int s, sn;
bool exit = false;
pthread_rwlock_wrlock(&rwlock);
if (start < ids.size()) {
s = start;
sn = ids.size() - start;
sn = min(sn, fixed_key_size);
start += sn;
} else {
exit = true;
}
pthread_rwlock_unlock(&rwlock);
if (exit) break;
std::vector<std::shared_ptr<char>> buffers(sn);
std::vector<int> ac(sn);
auto status = graph_table.random_sample_neighbors(
ids.data() + s, sample_size, buffers, ac, false);
for (int j = s; j < s + sn; j++) {
sample_id[i].push_back(ids[j]);
actual_size[i].push_back(ac[j - s] / sizeof(int64_t));
int ss = ac[j - s] / sizeof(int64_t);
for (int k = 0; k < ss; k++) {
sample_neighbors[i].push_back(
*((int64_t *)(buffers[j - s].get() + k * sizeof(int64_t))));
}
}
}
VLOG(0) << "func " << i << " returns ";
};
auto start1 = std::chrono::steady_clock::now();
std::thread thr[10];
for (int i = 0; i < 10; i++) {
thr[i] = std::thread(func, i);
}
for (int i = 0; i < 10; i++) thr[i].join();
auto end1 = std::chrono::steady_clock::now();
auto tt =
std::chrono::duration_cast<std::chrono::microseconds>(end1 - start1);
std::cerr << "total time cost without cache is " << tt.count() << " us"
<< std::endl;
int64_t tot = 0;
for (int i = 0; i < 10; i++) {
for (auto x : sample_id[i]) tot += x;
}
VLOG(0) << "sum = " << tot;
}
gpu_num = 0;
int st = 0, u = 0;
std::vector<int> device_id_mapping;
while (u < gpu_str.size()) {
VLOG(0) << u << " " << gpu_str[u];
if (gpu_str[u] == ',') {
auto p = gpu_str.substr(st, u - st);
int id = std::stoi(p);
VLOG(0) << "got a new device id" << id;
device_id_mapping.push_back(id);
st = u + 1;
}
u++;
}
auto p = gpu_str.substr(st, gpu_str.size() - st);
int id = std::stoi(p);
VLOG(0) << "got a new device id" << id;
device_id_mapping.push_back(id);
gpu_num = device_id_mapping.size();
::paddle::distributed::GraphParameter table_proto;
table_proto.set_shard_num(24);
// table_proto.set_gpups_graph_sample_class("CompleteGraphSampler");
std::shared_ptr<HeterPsResource> resource =
std::make_shared<HeterPsResource>(device_id_mapping);
resource->enable_p2p();
GpuPsGraphTable g(resource, use_nv);
g.init_cpu_table(table_proto);
std::vector<std::string> arg;
AllInGpuGraphSampler sampler;
sampler.init(&g, arg);
// g.load(std::string(input_file), std::string("e>"));
// sampler.start(std::string(input_file));
// sampler.load_from_ssd(std::string(input_file));
sampler.start_service(input_file);
/*
NodeQueryResult *query_node_res;
query_node_res = g.query_node_list(0, 0, ids.size() + 10000);
VLOG(0) << "gpu got " << query_node_res->actual_sample_size << " nodes ";
VLOG(0) << "cpu got " << ids.size() << " nodes";
ASSERT_EQ((int)query_node_res->actual_sample_size, (int)ids.size());
int64_t *gpu_node_res = new int64_t[ids.size()];
hipMemcpy(gpu_node_res, query_node_res->val, ids.size() * sizeof(int64_t),
hipMemcpyDeviceToHost);
std::unordered_set<int64_t> cpu_node_set, gpu_node_set;
for (auto x : ids) {
cpu_node_set.insert(x);
}
for (int i = 0; i < (int)query_node_res->actual_sample_size; i++) {
auto x = gpu_node_res[i];
ASSERT_EQ(cpu_node_set.find(x) != cpu_node_set.end(), true);
gpu_node_set.insert(x);
}
VLOG(0) << " cpu_node_size = " << cpu_node_set.size();
VLOG(0) << " gpu_node_size = " << gpu_node_set.size();
ASSERT_EQ(cpu_node_set.size(), gpu_node_set.size());
for (int i = 0; i < 20; i++) {
int st = ids.size() / 20 * i;
auto q = g.query_node_list(0, st, ids.size() / 20);
VLOG(0) << " the " << i << "th iteration size = " << q->actual_sample_size;
}
// NodeQueryResult *query_node_list(int gpu_id, int start, int query_size);
*/
for (int i = 0; i < gpu_num1; i++) {
platform::CUDADeviceGuard guard(device_id_mapping[i]);
hipMalloc((void **)&key[i], ids.size() * sizeof(int64_t));
hipMemcpy(key[i], ids.data(), ids.size() * sizeof(int64_t),
hipMemcpyHostToDevice);
}
/*
hipMalloc((void **)&key, ids.size() * sizeof(int64_t));
hipMemcpy(key, ids.data(), ids.size() * sizeof(int64_t),
hipMemcpyHostToDevice);
*/
/*
std::vector<std::vector<NeighborSampleResult *>> res(gpu_num1);
for (int i = 0; i < gpu_num1; i++) {
int st = 0;
int size = ids.size();
NeighborSampleResult *result = new NeighborSampleResult(sample_size, size);
platform::CUDAPlace place = platform::CUDAPlace(device_id_mapping[i]);
platform::CUDADeviceGuard guard(device_id_mapping[i]);
hipMalloc((void **)&result->val, size * sample_size * sizeof(int64_t));
hipMalloc((void **)&result->actual_sample_size, size * sizeof(int));
res[i].push_back(result);
}
*/
// g.graph_neighbor_sample
start = 0;
auto func = [&rwlock, &g, &start, &ids](int i) {
int st = 0;
int size = ids.size();
for (int k = 0; k < exe_count; k++) {
st = 0;
while (st < size) {
int len = ::min(fixed_key_size, (int)ids.size() - st);
auto r = g.graph_neighbor_sample(i, (int64_t *)(key[i] + st),
sample_size, len);
st += len;
delete r;
}
}
};
auto start1 = std::chrono::steady_clock::now();
std::thread thr[gpu_num1];
for (int i = 0; i < gpu_num1; i++) {
thr[i] = std::thread(func, i);
}
for (int i = 0; i < gpu_num1; i++) thr[i].join();
auto end1 = std::chrono::steady_clock::now();
auto tt =
std::chrono::duration_cast<std::chrono::microseconds>(end1 - start1);
std::cerr << "total time cost without cache for v1 is "
<< tt.count() / exe_count / gpu_num1 << " us" << std::endl;
// g.graph_neighbor_sample_v2
start = 0;
auto func2 = [&rwlock, &g, &start, &ids](int i) {
int st = 0;
int size = ids.size();
for (int k = 0; k < exe_count; k++) {
st = 0;
while (st < size) {
int len = ::min(fixed_key_size, (int)ids.size() - st);
auto r = g.graph_neighbor_sample_v2(i, (int64_t *)(key[i] + st),
sample_size, len, false);
st += len;
delete r;
}
}
};
auto start2 = std::chrono::steady_clock::now();
std::thread thr2[gpu_num1];
for (int i = 0; i < gpu_num1; i++) {
thr2[i] = std::thread(func2, i);
}
for (int i = 0; i < gpu_num1; i++) thr2[i].join();
auto end2 = std::chrono::steady_clock::now();
auto tt2 =
std::chrono::duration_cast<std::chrono::microseconds>(end2 - start2);
std::cerr << "total time cost without cache for v2 is "
<< tt2.count() / exe_count / gpu_num1 << " us" << std::endl;
for (int i = 0; i < gpu_num1; i++) {
hipFree(key[i]);
}
#endif
}
TEST(TEST_FLEET, sample_rate) { testSampleRate(); }
int main(int argc, char *argv[]) {
for (int i = 0; i < argc; i++)
VLOG(0) << "Argument " << i << " is " << std::string(argv[i]);
if (argc > 1) {
input_file = argv[1];
} else {
prepare_file(edge_file_name, edges);
input_file = edge_file_name;
}
VLOG(0) << "input_file is " << input_file;
if (argc > 2) {
fixed_key_size = std::stoi(argv[2]);
}
VLOG(0) << "sample_node_size for every batch is " << fixed_key_size;
if (argc > 3) {
sample_size = std::stoi(argv[3]);
}
VLOG(0) << "sample_size neighbor_size is " << sample_size;
if (argc > 4) init_search_size = std::stoi(argv[4]);
VLOG(0) << " init_search_size " << init_search_size;
if (argc > 5) {
gpu_str = argv[5];
}
VLOG(0) << " gpu_str= " << gpu_str;
gpu_num = 0;
if (argc > 6) gpu_num1 = std::stoi(argv[6]);
VLOG(0) << " gpu_thread_num= " << gpu_num1;
if (argc > 7) use_nv = std::stoi(argv[7]);
VLOG(0) << " use_nv " << use_nv;
testSampleRate();
}
|
57739ed5d7ede90292f36707731a7910e19628ca.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <unistd.h>
#include <condition_variable> // NOLINT
#include <fstream>
#include <iomanip>
#include <string>
#include <thread> // NOLINT
#include <unordered_set>
#include <vector>
#include "google/protobuf/text_format.h"
#include <chrono>
#include "gtest/gtest.h"
#include "paddle/fluid/distributed/ps.pb.h"
#include "paddle/fluid/distributed/ps/service/env.h"
#include "paddle/fluid/distributed/ps/service/sendrecv.pb.h"
#include "paddle/fluid/distributed/ps/table/common_graph_table.h"
#include "paddle/fluid/distributed/ps/table/graph/graph_node.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/printf.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/fluid/framework/fleet/heter_ps/feature_value.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_sampler.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_comm.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h"
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
using namespace paddle::framework;
namespace platform = paddle::platform;
namespace operators = paddle::operators;
namespace memory = paddle::memory;
namespace distributed = paddle::distributed;
std::string input_file;
int exe_count = 100;
int use_nv = 1;
int fixed_key_size = 50000, sample_size = 32,
bfs_sample_nodes_in_each_shard = 10000, init_search_size = 1,
bfs_sample_edges = 20, gpu_num1 = 8, gpu_num = 8;
std::string gpu_str = "0,1,2,3,4,5,6,7";
int64_t *key[8];
std::vector<std::string> edges = {
std::string("37\t45\t0.34"), std::string("37\t145\t0.31"),
std::string("37\t112\t0.21"), std::string("96\t48\t1.4"),
std::string("96\t247\t0.31"), std::string("96\t111\t1.21"),
std::string("59\t45\t0.34"), std::string("59\t145\t0.31"),
std::string("59\t122\t0.21"), std::string("97\t48\t0.34"),
std::string("97\t247\t0.31"), std::string("97\t111\t0.21")};
// odd id:96 48 122 112
char edge_file_name[] = "test_edges.txt";
void prepare_file(char file_name[], std::vector<std::string> data) {
std::ofstream ofile;
ofile.open(file_name);
for (auto x : data) {
ofile << x << std::endl;
}
ofile.close();
}
void testSampleRate() {
#ifdef PADDLE_WITH_HETERPS
std::vector<int64_t> ids;
int start = 0;
pthread_rwlock_t rwlock;
pthread_rwlock_init(&rwlock, NULL);
{
::paddle::distributed::GraphParameter table_proto;
// table_proto.set_gpups_mode(false);
table_proto.set_shard_num(127);
table_proto.set_task_pool_size(24);
std::cerr << "initializing begin";
distributed::GraphTable graph_table;
graph_table.Initialize(table_proto);
std::cerr << "initializing done";
graph_table.Load(input_file, std::string("e>"));
int sample_actual_size = -1;
int step = fixed_key_size, cur = 0;
while (sample_actual_size != 0) {
std::unique_ptr<char[]> buffer;
graph_table.pull_graph_list(cur, step, buffer, sample_actual_size, false,
1);
int index = 0;
while (index < sample_actual_size) {
paddle::distributed::FeatureNode node;
node.recover_from_buffer(buffer.get() + index);
index += node.get_size(false);
// res.push_back(node);
ids.push_back(node.get_id());
int swap_pos = rand() % ids.size();
std::swap(ids[swap_pos], ids[(int)ids.size() - 1]);
}
cur = ids.size();
// if (sample_actual_size == 0) break;
// char *buff = buffer.get();
// for (int i = 0; i < sample_actual_size/sizeof(int64_t); i++) {
// ids.push_back(*((int64_t *)buff + i));
// int swap_pos = rand() % ids.size();
// std::swap(ids[swap_pos], ids[(int)ids.size() - 1]);
// }
// cur += sample_actual_size/sizeof(int64_t);
}
std::cerr << "load ids done" << std::endl;
std::vector<int64_t> sample_id[10], sample_neighbors[10];
std::vector<int> actual_size[10];
auto func = [&rwlock, &graph_table, &ids, &sample_id, &actual_size,
&sample_neighbors, &start](int i) {
while (true) {
int s, sn;
bool exit = false;
pthread_rwlock_wrlock(&rwlock);
if (start < ids.size()) {
s = start;
sn = ids.size() - start;
sn = min(sn, fixed_key_size);
start += sn;
} else {
exit = true;
}
pthread_rwlock_unlock(&rwlock);
if (exit) break;
std::vector<std::shared_ptr<char>> buffers(sn);
std::vector<int> ac(sn);
auto status = graph_table.random_sample_neighbors(
ids.data() + s, sample_size, buffers, ac, false);
for (int j = s; j < s + sn; j++) {
sample_id[i].push_back(ids[j]);
actual_size[i].push_back(ac[j - s] / sizeof(int64_t));
int ss = ac[j - s] / sizeof(int64_t);
for (int k = 0; k < ss; k++) {
sample_neighbors[i].push_back(
*((int64_t *)(buffers[j - s].get() + k * sizeof(int64_t))));
}
}
}
VLOG(0) << "func " << i << " returns ";
};
auto start1 = std::chrono::steady_clock::now();
std::thread thr[10];
for (int i = 0; i < 10; i++) {
thr[i] = std::thread(func, i);
}
for (int i = 0; i < 10; i++) thr[i].join();
auto end1 = std::chrono::steady_clock::now();
auto tt =
std::chrono::duration_cast<std::chrono::microseconds>(end1 - start1);
std::cerr << "total time cost without cache is " << tt.count() << " us"
<< std::endl;
int64_t tot = 0;
for (int i = 0; i < 10; i++) {
for (auto x : sample_id[i]) tot += x;
}
VLOG(0) << "sum = " << tot;
}
gpu_num = 0;
int st = 0, u = 0;
std::vector<int> device_id_mapping;
while (u < gpu_str.size()) {
VLOG(0) << u << " " << gpu_str[u];
if (gpu_str[u] == ',') {
auto p = gpu_str.substr(st, u - st);
int id = std::stoi(p);
VLOG(0) << "got a new device id" << id;
device_id_mapping.push_back(id);
st = u + 1;
}
u++;
}
auto p = gpu_str.substr(st, gpu_str.size() - st);
int id = std::stoi(p);
VLOG(0) << "got a new device id" << id;
device_id_mapping.push_back(id);
gpu_num = device_id_mapping.size();
::paddle::distributed::GraphParameter table_proto;
table_proto.set_shard_num(24);
// table_proto.set_gpups_graph_sample_class("CompleteGraphSampler");
std::shared_ptr<HeterPsResource> resource =
std::make_shared<HeterPsResource>(device_id_mapping);
resource->enable_p2p();
GpuPsGraphTable g(resource, use_nv);
g.init_cpu_table(table_proto);
std::vector<std::string> arg;
AllInGpuGraphSampler sampler;
sampler.init(&g, arg);
// g.load(std::string(input_file), std::string("e>"));
// sampler.start(std::string(input_file));
// sampler.load_from_ssd(std::string(input_file));
sampler.start_service(input_file);
/*
NodeQueryResult *query_node_res;
query_node_res = g.query_node_list(0, 0, ids.size() + 10000);
VLOG(0) << "gpu got " << query_node_res->actual_sample_size << " nodes ";
VLOG(0) << "cpu got " << ids.size() << " nodes";
ASSERT_EQ((int)query_node_res->actual_sample_size, (int)ids.size());
int64_t *gpu_node_res = new int64_t[ids.size()];
cudaMemcpy(gpu_node_res, query_node_res->val, ids.size() * sizeof(int64_t),
cudaMemcpyDeviceToHost);
std::unordered_set<int64_t> cpu_node_set, gpu_node_set;
for (auto x : ids) {
cpu_node_set.insert(x);
}
for (int i = 0; i < (int)query_node_res->actual_sample_size; i++) {
auto x = gpu_node_res[i];
ASSERT_EQ(cpu_node_set.find(x) != cpu_node_set.end(), true);
gpu_node_set.insert(x);
}
VLOG(0) << " cpu_node_size = " << cpu_node_set.size();
VLOG(0) << " gpu_node_size = " << gpu_node_set.size();
ASSERT_EQ(cpu_node_set.size(), gpu_node_set.size());
for (int i = 0; i < 20; i++) {
int st = ids.size() / 20 * i;
auto q = g.query_node_list(0, st, ids.size() / 20);
VLOG(0) << " the " << i << "th iteration size = " << q->actual_sample_size;
}
// NodeQueryResult *query_node_list(int gpu_id, int start, int query_size);
*/
for (int i = 0; i < gpu_num1; i++) {
platform::CUDADeviceGuard guard(device_id_mapping[i]);
cudaMalloc((void **)&key[i], ids.size() * sizeof(int64_t));
cudaMemcpy(key[i], ids.data(), ids.size() * sizeof(int64_t),
cudaMemcpyHostToDevice);
}
/*
cudaMalloc((void **)&key, ids.size() * sizeof(int64_t));
cudaMemcpy(key, ids.data(), ids.size() * sizeof(int64_t),
cudaMemcpyHostToDevice);
*/
/*
std::vector<std::vector<NeighborSampleResult *>> res(gpu_num1);
for (int i = 0; i < gpu_num1; i++) {
int st = 0;
int size = ids.size();
NeighborSampleResult *result = new NeighborSampleResult(sample_size, size);
platform::CUDAPlace place = platform::CUDAPlace(device_id_mapping[i]);
platform::CUDADeviceGuard guard(device_id_mapping[i]);
cudaMalloc((void **)&result->val, size * sample_size * sizeof(int64_t));
cudaMalloc((void **)&result->actual_sample_size, size * sizeof(int));
res[i].push_back(result);
}
*/
// g.graph_neighbor_sample
start = 0;
auto func = [&rwlock, &g, &start, &ids](int i) {
int st = 0;
int size = ids.size();
for (int k = 0; k < exe_count; k++) {
st = 0;
while (st < size) {
int len = std::min(fixed_key_size, (int)ids.size() - st);
auto r = g.graph_neighbor_sample(i, (int64_t *)(key[i] + st),
sample_size, len);
st += len;
delete r;
}
}
};
auto start1 = std::chrono::steady_clock::now();
std::thread thr[gpu_num1];
for (int i = 0; i < gpu_num1; i++) {
thr[i] = std::thread(func, i);
}
for (int i = 0; i < gpu_num1; i++) thr[i].join();
auto end1 = std::chrono::steady_clock::now();
auto tt =
std::chrono::duration_cast<std::chrono::microseconds>(end1 - start1);
std::cerr << "total time cost without cache for v1 is "
<< tt.count() / exe_count / gpu_num1 << " us" << std::endl;
// g.graph_neighbor_sample_v2
start = 0;
auto func2 = [&rwlock, &g, &start, &ids](int i) {
int st = 0;
int size = ids.size();
for (int k = 0; k < exe_count; k++) {
st = 0;
while (st < size) {
int len = std::min(fixed_key_size, (int)ids.size() - st);
auto r = g.graph_neighbor_sample_v2(i, (int64_t *)(key[i] + st),
sample_size, len, false);
st += len;
delete r;
}
}
};
auto start2 = std::chrono::steady_clock::now();
std::thread thr2[gpu_num1];
for (int i = 0; i < gpu_num1; i++) {
thr2[i] = std::thread(func2, i);
}
for (int i = 0; i < gpu_num1; i++) thr2[i].join();
auto end2 = std::chrono::steady_clock::now();
auto tt2 =
std::chrono::duration_cast<std::chrono::microseconds>(end2 - start2);
std::cerr << "total time cost without cache for v2 is "
<< tt2.count() / exe_count / gpu_num1 << " us" << std::endl;
for (int i = 0; i < gpu_num1; i++) {
cudaFree(key[i]);
}
#endif
}
TEST(TEST_FLEET, sample_rate) { testSampleRate(); }
int main(int argc, char *argv[]) {
for (int i = 0; i < argc; i++)
VLOG(0) << "Argument " << i << " is " << std::string(argv[i]);
if (argc > 1) {
input_file = argv[1];
} else {
prepare_file(edge_file_name, edges);
input_file = edge_file_name;
}
VLOG(0) << "input_file is " << input_file;
if (argc > 2) {
fixed_key_size = std::stoi(argv[2]);
}
VLOG(0) << "sample_node_size for every batch is " << fixed_key_size;
if (argc > 3) {
sample_size = std::stoi(argv[3]);
}
VLOG(0) << "sample_size neighbor_size is " << sample_size;
if (argc > 4) init_search_size = std::stoi(argv[4]);
VLOG(0) << " init_search_size " << init_search_size;
if (argc > 5) {
gpu_str = argv[5];
}
VLOG(0) << " gpu_str= " << gpu_str;
gpu_num = 0;
if (argc > 6) gpu_num1 = std::stoi(argv[6]);
VLOG(0) << " gpu_thread_num= " << gpu_num1;
if (argc > 7) use_nv = std::stoi(argv[7]);
VLOG(0) << " use_nv " << use_nv;
testSampleRate();
}
|
515da439a7b8c7507c3527a40a8c13025ae289aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
//#include <mgp.h>
// Includes
//#include <stdio.h>
//#include "../include/ContAcq-IntClk.h"
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 1024
#define NUM_OF_BLOCKS 160
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2( unsigned* A, unsigned* B, int N)
{
int tid = threadIdx.x;
int i = blockDim.x * blockIdx.x + tid;
__device__ __shared__ volatile unsigned sharedInp[THREADS_PER_BLOCK];
__device__ __shared__ volatile unsigned sharedOut[THREADS_PER_BLOCK];
sharedInp[tid] = A[i];
__syncthreads();
unsigned load_value;
volatile unsigned* loadAddr = sharedInp+ tid;
volatile unsigned* storeAddr = sharedOut+ tid;
//unsigned sum_value = 0;
#pragma unroll 100
for(unsigned k=0; k<N;k++) {
// __asm volatile(
// "ld.shared.u32 %0, [%1]; \n"
// "st.shared.u32 [%2], %0;"
// : "+r"(load_value) : "l"((loadAddr )) , "l"((storeAddr))
// );
load_value = *loadAddr;
*storeAddr = load_value;
}
B[i] = sharedOut[tid];
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if(argc!=2) {
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks with iterations %d\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
// Copy vector from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B,iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_B contains the result in host memory
checkCudaErrors( hipMemcpy(h_B, d_B, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
|
515da439a7b8c7507c3527a40a8c13025ae289aa.cu
|
#include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
//#include <mgp.h>
// Includes
//#include <stdio.h>
//#include "../include/ContAcq-IntClk.h"
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 1024
#define NUM_OF_BLOCKS 160
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2( unsigned* A, unsigned* B, int N)
{
int tid = threadIdx.x;
int i = blockDim.x * blockIdx.x + tid;
__device__ __shared__ volatile unsigned sharedInp[THREADS_PER_BLOCK];
__device__ __shared__ volatile unsigned sharedOut[THREADS_PER_BLOCK];
sharedInp[tid] = A[i];
__syncthreads();
unsigned load_value;
volatile unsigned* loadAddr = sharedInp+ tid;
volatile unsigned* storeAddr = sharedOut+ tid;
//unsigned sum_value = 0;
#pragma unroll 100
for(unsigned k=0; k<N;k++) {
// __asm volatile(
// "ld.shared.u32 %0, [%1]; \n"
// "st.shared.u32 [%2], %0;"
// : "+r"(load_value) : "l"((loadAddr )) , "l"((storeAddr))
// );
load_value = *loadAddr;
*storeAddr = load_value;
}
B[i] = sharedOut[tid];
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if(argc!=2) {
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks with iterations %d\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
// Copy vector from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B,iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_B contains the result in host memory
checkCudaErrors( cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
|
e657526b9bdc9bf01984b5f84eba98769726b1e9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*-------------|SMC Change: Add smc.h header|-------------*/
#include "smc.h"
/**
* Naive Example of Matrix Addition
*
*/
/**
* Matrix multiplication: C = A + B.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
void constantInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = (float)rand()/RAND_MAX;
}
}
int matrixAdd_gold(float *A, float *B, float*C, int size){
for (int i=0;i<size;i++)
C[i] = A[i] + B[i];
return 0;
}
/**
* Matrix addition (CUDA Kernel) on the device: C = A + B
* w is matrix width, h is matrix height
*/
__global__ void
matrixAddCUDA(float *C, float *A, float *B, int w, int h,dim3 __SMC_orgGridDim, int __SMC_workersNeeded, int *__SMC_workerCount, int * __SMC_newChunkSeq, int * __SMC_seqEnds)
{
/*-------------|SMC Change: Add __SMC_Begin|-------------*/
__SMC_Begin
// Block index
int bx = (int)fmodf((float)__SMC_chunkID, (float)__SMC_orgGridDim.x)/*-------------|SMC Change: Replace blockIdx.x|-------------*/;
int by = (int)(__SMC_chunkID/__SMC_orgGridDim.x)/*-------------|SMC Change: Replace blockIdx.y|-------------*/;
printf("Referrence of blockIdx.x: %d.\n", (int)fmodf((float)__SMC_chunkID, (float)__SMC_orgGridDim.x)/*-------------|SMC Change: Replace blockIdx.x|-------------*/);
printf("Referrence of blockIdx.y: %d.\n", (int)(__SMC_chunkID/__SMC_orgGridDim.x)/*-------------|SMC Change: Replace blockIdx.y|-------------*/);
// Thread local index
int txl = threadIdx.x;
int tyl = threadIdx.y;
// Thread global index
int tx = txl+bx*blockDim.x;
int ty = tyl+by*blockDim.y;
int glbIdx = ty*w+tx;
int maxidx = w*h-1;
if (glbIdx<0 || glbIdx>maxidx){
printf("Error: glbIdx is %d.\n", glbIdx);
}
else{
// Do addition
C[glbIdx] = A[glbIdx] + B[glbIdx];
}
// if (threadIdx.x==0 && threadIdx.y==0){
// printf("bx=%d, by=%d, txl=%d, tyl=%d, glbIdx=%d, A[glbIdx]=%f, B[glbIdx]=%f, C[glbIdx]=%f\n",
// bx, by, txl, tyl, glbIdx, A[glbIdx], B[glbIdx], C[glbIdx]);
// }
/*-------------|SMC Change: Add __SMC_End|-------------*/
__SMC_End
}
/**
* A wrapper that calls the GPU kernel
*/
int matrixAdd(int block_size, int w, int h)
{
// Allocate host memory for matrices A and B
unsigned int sz = w*h;
unsigned int mem_size = sizeof(float) * sz;
float *h_A = (float *)malloc(mem_size);
float *h_B = (float *)malloc(mem_size);
float *h_C = (float *) malloc(mem_size);
// Initialize host memory
constantInit(h_A, sz);
constantInit(h_B, sz);
// Allocate device memory
float *d_A, *d_B, *d_C;
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size);
error = hipMalloc((void **) &d_B, mem_size);
error = hipMalloc((void **) &d_C, mem_size);
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(w / threads.x, h / threads.y);
/*-------------|SMC Change: Change Grid()|-------------*/
dim3 __SMC_orgGridDim(w / threads.x, h / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
/*-------------|SMC Change: Add __SMC_init()|-------------*/
__SMC_init();
/*-------------|SMC Change: Add arguments to kernel function call|-------------*/
hipLaunchKernelGGL(( matrixAddCUDA), dim3(newGrid), dim3(threads) , 0, 0, d_C, d_A, d_B, w, h, __SMC_orgGridDim, __SMC_workersNeeded, __SMC_workerCount, __SMC_newChunkSeq, __SMC_seqEnds);
printf("done\n");
hipDeviceSynchronize();
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
/* check the result correctness */
float g_sum=0, c_sum=0;
for (int i=0;i<w*h;i++) {
// if (fmod(i,32*w)==0) printf("h_C[%d]=%f\n",i,h_C[i]);
g_sum += h_C[i];
}
matrixAdd_gold(h_A, h_B, h_C, w*h);
for (int i=0;i<w*h;i++) c_sum += h_C[i];
if (abs(g_sum - c_sum)<1e-10){
printf("Pass...\n");
}
else{
printf("Fail: %f vs. %f.\n", g_sum, c_sum);
}
/*-------------|SMC Change: Add __SMC_init()|-------------*/
__SMC_init();
/*-------------|SMC Change: Add arguments to kernel function call|-------------*/
hipLaunchKernelGGL(( matrixAddCUDA), dim3(newGrid), dim3(threads) , 0, 0, d_C, d_A, d_B, w, h, __SMC_orgGridDim, __SMC_workersNeeded, __SMC_workerCount, __SMC_newChunkSeq, __SMC_seqEnds);
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
}
|
e657526b9bdc9bf01984b5f84eba98769726b1e9.cu
|
/*-------------|SMC Change: Add smc.h header|-------------*/
#include "smc.h"
/**
* Naive Example of Matrix Addition
*
*/
/**
* Matrix multiplication: C = A + B.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
void constantInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = (float)rand()/RAND_MAX;
}
}
int matrixAdd_gold(float *A, float *B, float*C, int size){
for (int i=0;i<size;i++)
C[i] = A[i] + B[i];
return 0;
}
/**
* Matrix addition (CUDA Kernel) on the device: C = A + B
* w is matrix width, h is matrix height
*/
__global__ void
matrixAddCUDA(float *C, float *A, float *B, int w, int h,dim3 __SMC_orgGridDim, int __SMC_workersNeeded, int *__SMC_workerCount, int * __SMC_newChunkSeq, int * __SMC_seqEnds)
{
/*-------------|SMC Change: Add __SMC_Begin|-------------*/
__SMC_Begin
// Block index
int bx = (int)fmodf((float)__SMC_chunkID, (float)__SMC_orgGridDim.x)/*-------------|SMC Change: Replace blockIdx.x|-------------*/;
int by = (int)(__SMC_chunkID/__SMC_orgGridDim.x)/*-------------|SMC Change: Replace blockIdx.y|-------------*/;
printf("Referrence of blockIdx.x: %d.\n", (int)fmodf((float)__SMC_chunkID, (float)__SMC_orgGridDim.x)/*-------------|SMC Change: Replace blockIdx.x|-------------*/);
printf("Referrence of blockIdx.y: %d.\n", (int)(__SMC_chunkID/__SMC_orgGridDim.x)/*-------------|SMC Change: Replace blockIdx.y|-------------*/);
// Thread local index
int txl = threadIdx.x;
int tyl = threadIdx.y;
// Thread global index
int tx = txl+bx*blockDim.x;
int ty = tyl+by*blockDim.y;
int glbIdx = ty*w+tx;
int maxidx = w*h-1;
if (glbIdx<0 || glbIdx>maxidx){
printf("Error: glbIdx is %d.\n", glbIdx);
}
else{
// Do addition
C[glbIdx] = A[glbIdx] + B[glbIdx];
}
// if (threadIdx.x==0 && threadIdx.y==0){
// printf("bx=%d, by=%d, txl=%d, tyl=%d, glbIdx=%d, A[glbIdx]=%f, B[glbIdx]=%f, C[glbIdx]=%f\n",
// bx, by, txl, tyl, glbIdx, A[glbIdx], B[glbIdx], C[glbIdx]);
// }
/*-------------|SMC Change: Add __SMC_End|-------------*/
__SMC_End
}
/**
* A wrapper that calls the GPU kernel
*/
int matrixAdd(int block_size, int w, int h)
{
// Allocate host memory for matrices A and B
unsigned int sz = w*h;
unsigned int mem_size = sizeof(float) * sz;
float *h_A = (float *)malloc(mem_size);
float *h_B = (float *)malloc(mem_size);
float *h_C = (float *) malloc(mem_size);
// Initialize host memory
constantInit(h_A, sz);
constantInit(h_B, sz);
// Allocate device memory
float *d_A, *d_B, *d_C;
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size);
error = cudaMalloc((void **) &d_B, mem_size);
error = cudaMalloc((void **) &d_C, mem_size);
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(w / threads.x, h / threads.y);
/*-------------|SMC Change: Change Grid()|-------------*/
dim3 __SMC_orgGridDim(w / threads.x, h / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
/*-------------|SMC Change: Add __SMC_init()|-------------*/
__SMC_init();
/*-------------|SMC Change: Add arguments to kernel function call|-------------*/
matrixAddCUDA<<< newGrid, threads >>>(d_C, d_A, d_B, w, h, __SMC_orgGridDim, __SMC_workersNeeded, __SMC_workerCount, __SMC_newChunkSeq, __SMC_seqEnds);
printf("done\n");
cudaDeviceSynchronize();
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
/* check the result correctness */
float g_sum=0, c_sum=0;
for (int i=0;i<w*h;i++) {
// if (fmod(i,32*w)==0) printf("h_C[%d]=%f\n",i,h_C[i]);
g_sum += h_C[i];
}
matrixAdd_gold(h_A, h_B, h_C, w*h);
for (int i=0;i<w*h;i++) c_sum += h_C[i];
if (abs(g_sum - c_sum)<1e-10){
printf("Pass...\n");
}
else{
printf("Fail: %f vs. %f.\n", g_sum, c_sum);
}
/*-------------|SMC Change: Add __SMC_init()|-------------*/
__SMC_init();
/*-------------|SMC Change: Add arguments to kernel function call|-------------*/
matrixAddCUDA<<< newGrid, threads >>>(d_C, d_A, d_B, w, h, __SMC_orgGridDim, __SMC_workersNeeded, __SMC_workerCount, __SMC_newChunkSeq, __SMC_seqEnds);
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
5cbca577d99e569a75ff49e16cdecc0e591e34b7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "common/mnist_reader.hpp"
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#include "kernel.h"
#define BLOCK_SIZE 16
#define N2 128
__device__ float sigmoid(float x)
{
float exp_value;
float return_value;
/*** Exponential calculation ***/
exp_value = exp((float) -x);
/*** Final sigmoid value ***/
return_value = 1 / (1 + exp_value);
return return_value;
}
__global__ void back_propagation_1(float* d_layer3, float* d_layer2, float* d_theta3,
float* d_theta2, float* d_w2, int label, int n2) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d_theta3_shared[10];
if (index < 10) {
float expected = (index == label) ? 1.0 : 0.0;
float d_layer3_temp = d_layer3[index];
d_theta3_shared[index] = d_layer3_temp * (1 - d_layer3_temp) * (expected - d_layer3_temp);
d_theta3[index] = d_theta3_shared[index];
}
__syncthreads();
if (index < n2) {
float sum = 0.0;
for (int j = 0; j < 10; j++) {
// --- Column major order
sum += d_w2[index * 10 + j] * d_theta3_shared[j];
// --- Row major order
//sum += d_w2[j * n2 + index] * d_theta3[j];
}
float d_layer2_temp = d_layer2[index];
d_theta2[index] = d_layer2_temp * (1 - d_layer2_temp) * sum;
}
}
__global__ void back_propagation_2_w2(float* d_w2, float* d_delta2, float* d_theta3, float* d_layer2,
float learning_rate, float momentum, int n2) {
// --- Column major order
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float temp;
// int idx = threadIdx.y * blockDim.x + threadIdx.x;
// __shared__ float d_theta3_shared[10];
// __shared__ float d_layer2_shared[N2];
// if (idx<10)
// d_theta3_shared[idx] = d_theta3[idx];
// if (idx<n2)
// d_layer2_shared[idx] = d_layer2[idx];
//
// __syncthreads();
if (j < 10 && i < n2) {
temp = (learning_rate * d_theta3[j] * d_layer2[i]) + (momentum * d_delta2[i * 10 + j]);
d_delta2[i * 10 + j] = temp;
d_w2[i * 10 + j] += temp;
}
// --- Row major order
/*
int j = blockIdx.y * blockDim.y + threadIdx.y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (j < 10 && i < n2) {
d_delta2[j * n2 + i] = (learning_rate * d_theta3[j] * d_layer2[i]) + (momentum * d_delta2[j * n2 + i]);
d_w2[j * n2 + i] += d_delta2[j * n2 + i];
}
*/
}
__global__ void back_propagation_2_w1(float* d_w1, float* d_delta1, float* d_theta2, float* d_layer1,
float learning_rate, float momentum, int n2) {
// --- Column major order
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float temp;
if (j < n2 && i < 784) {
temp = (learning_rate * d_theta2[j] * d_layer1[i]) + (momentum * d_delta1[i * n2 + j]);
d_delta1[i * n2 + j] = temp;
d_w1[i * n2 + j] += temp;
}
// --- Row major order
/*
int j = blockIdx.y * blockDim.y + threadIdx.y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (j < n2 && i < 784) {
d_delta1[j * 784 + i] = (learning_rate * d_theta2[j] * d_layer1[i]) + (momentum * d_delta1[j * 784 + i]);
d_w1[j * 784 + i] += d_delta1[j * 784 + i];
}
*/
}
__global__ void perceptron(float* d_layer1, float* d_layer2, float* d_layer3, float* d_w1,
float* d_w2, int n2) {
__shared__ float x_shared[BLOCK_SIZE];
int index = blockIdx.x * blockDim.x + threadIdx.x;
float y_val = 0.0;
/**
if (index < n2) {
float y_val = 0;
for (int i = 0; i < 784; i++) {
y_val += d_layer1[i] * d_w1[index * 784 + i];
}
d_layer2[index] = 1.0 / (1.0 + exp(-1.0 * y_val));
}
**/
#pragma unroll
for (unsigned int m = 0; m < ceil(784*1.0/BLOCK_SIZE); ++m)
{
if ((m * BLOCK_SIZE + threadIdx.x) < 784){
x_shared[threadIdx.x] = d_layer1[threadIdx.x + m * BLOCK_SIZE];
} else{
x_shared[threadIdx.x] = 0.0;
}
__syncthreads();
#pragma unroll
for (unsigned int e = 0; e < BLOCK_SIZE; ++e) {
// --- Column-major ordering - faster lower Accuracy
y_val += d_w1[index + (e + BLOCK_SIZE * m) * 128] * x_shared[e];
// --- Row-major ordering - slower higher Accuracy
//y_val += d_w1[index * 784 + (e + BLOCK_SIZE * m)] * x_shared[e];
}
__syncthreads();
}
if (index < n2) d_layer2[index] = 1.0 / (1.0 + exp(-1.0 * y_val));
__syncthreads();
// pass through third layer
if (index < 10) {
float val2 = 0.0;
for (int i = 0; i < n2; i++) {
// --- Column-major ordering
val2 += d_layer2[i] * d_w2[i * 10 + index];
// --- Row-major ordering
//val2 += d_layer2[i] * d_w2[index * n2 + i];
}
val2 = 1.0 / (1.0 + exp(-1.0 * val2));
d_layer3[index] = val2;
}
}
void wrapper(mnist::MNIST_dataset<std::vector, std::vector<uint8_t>, uint8_t> dataset) {
int n2;
int label;
float learning_rate;
float momentum;
float* layer1;
float* layer2;
float* layer3;
float* w1;
float* w2;
float* delta2;
float* delta1;
float* d_layer1;
float* d_layer2;
float* d_layer3;
float* d_w1;
float* d_w2;
float* d_theta3;
float* d_theta2;
float* d_delta2;
float* d_delta1;
float* training_images;
float* test_images;
// set the number of nodes in hidden layer
n2 = 128;
learning_rate = 5e-2;
momentum = 0.9;
// Neural Network Architecture 3 layers: 784 -> n2 -> 10
// allocate GPU memory
hipMalloc((void **)&d_layer1, 784 * sizeof(float));
hipMalloc((void **)&d_layer2, n2 * sizeof(float));
hipMalloc((void **)&d_layer3, 10 * sizeof(float));
hipMalloc((void **)&d_w1, 784 * n2 * sizeof(float));
hipMalloc((void **)&d_w2, n2 * 10 * sizeof(float));
hipMalloc((void **)&d_delta1, 784 * n2 * sizeof(float));
hipMalloc((void **)&d_delta2, n2 * 10 * sizeof(float));
hipMalloc((void **)&d_theta2, n2 * sizeof(float));
hipMalloc((void **)&d_theta3, 10 * sizeof(float));
// allocate host memory
layer1 = (float *)malloc(784 * sizeof(float));
layer2 = (float *)malloc(n2 * sizeof(float));
layer3 = (float *)malloc(10 * sizeof(float));
w1 = (float *)malloc(784 * n2 * sizeof(float));
w2 = (float *)malloc(n2 * 10 * sizeof(float));
delta1 = (float *)malloc(784 * n2 * sizeof(float));
delta2 = (float *)malloc(n2 * 10 * sizeof(float));
training_images = (float *)malloc(60000 * 784 * sizeof(float));
test_images = (float *)malloc(10000 * 784 * sizeof(float));
// hipHostMalloc((void **)&layer1, 784 * sizeof(float),hipHostMallocPortable);
// hipHostMalloc((void **)&layer2, n2 * sizeof(float),hipHostMallocPortable);
// hipHostMalloc((void **)&layer3, 10 * sizeof(float),hipHostMallocPortable);
// hipHostMalloc((void **)&w1, 784 * n2 * sizeof(float),hipHostMallocPortable);
// hipHostMalloc((void **)&w2, n2 * 10 * sizeof(float),hipHostMallocPortable);
// hipHostMalloc((void **)&delta1, 784 * n2 * sizeof(float),hipHostMallocPortable);
// hipHostMalloc((void **)&delta2, n2 * 10 * sizeof(float),hipHostMallocPortable);
// hipHostMalloc((void **)&training_images, 60000 * 784 * sizeof(float),hipHostMallocDefault);
// hipHostMalloc((void **)&test_images, 10000 * 784 * sizeof(float),hipHostMallocPortable);
// initialize weights to 0
for (int i = 0; i < n2; i++) {
for (int j = 0; j < 784; j++) {
int sign = rand() % 2;
/*
// Row major ordering w1: (n2 x 784)
w1[i * 784 + j] = (float)(rand() % 6) / 10.0;
if (sign == 1) {
w1[i * 784 + j] = -1 * w1[i * 784 + j];
}
delta1[i * 784 + j] = 0.0;
*/
// Column major ordering w1: (784 x n2)
w1[j * n2 + i] = (float)(rand() % 6) / 10.0;
if (sign == 1) {
w1[j * n2 + i] = -1 * w1[j * n2 + i];
}
delta1[j * n2 + i] = 0.0;
}
}
for (int i = 0; i < 10; i++) {
for (int j = 0; j < n2; j++) {
int sign = rand() % 2;
/*
// Row major ordering w2: (10 x n2)
w2[i * n2 + j] = (float)(rand() % 6) / 10.0;
if (sign == 1) {
w2[i * n2 + j] = -1 * w2[i * n2 + j];
}
delta2[i * n2 + j] = 0.0;
*/
// Column major ordering w2: (n2 x 10)
w2[j * 10 + i] = (float)(rand() % 6) / 10.0;
if (sign == 1) {
w2[j * 10 + i] = -1 * w2[j * 10 + i];
}
delta2[j * 10 + i] = 0.0;
}
}
// convert training and test data to float
for (int i = 0; i < 60000; i++) {
for (int j = 0; j < 784; j++) {
//training_images[i * 784 + j] = (float)dataset.training_images[i][j];
training_images[i * 784 + j] = dataset.training_images[i][j] == 0 ? 0.0 : 1.0;
}
}
for (int i = 0; i < 10000; i++) {
for (int j = 0; j < 784; j++) {
//test_images[i * 784 + j] = (float)dataset.test_images[i][j];
test_images[i * 784 + j] = dataset.test_images[i][j] == 0 ? 0.0 : 1.0;
}
}
// Copy necessary host memory to GPU memory
hipMemcpy(d_w1, w1, 784 * n2 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w2, w2, n2 * 10 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_delta1, delta1, 784 * n2 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_delta2, delta2, n2 * 10 * sizeof(float), hipMemcpyHostToDevice);
// Set the block and grid dim
// For forward process: one thread block of size n2 for each image. Currently just using one thread block.
dim3 DimGridF(ceil(128*1.0/BLOCK_SIZE), 1, 1);
dim3 DimBlockF(BLOCK_SIZE, 1, 1);
dim3 DimGridB_1(1, 1, 1);
dim3 DimBlockB_1(n2, 1, 1);
/*
// Row major order weight matrices
dim3 DimGridB_2_w2(ceil(n2/10.0), 1, 1);
dim3 DimBlockB_2_w2(10, 10, 1);
dim3 DimGridB_2_w1(ceil(784*1.0/BLOCK_SIZE), ceil(n2*1.0/BLOCK_SIZE), 1);
dim3 DimBlockB_2_w1(BLOCK_SIZE, BLOCK_SIZE, 1);
*/
// Column major order weight matrices
dim3 DimGridB_2_w2(1, ceil(n2/10.0), 1);
dim3 DimBlockB_2_w2(10, 10, 1);
dim3 DimGridB_2_w1(ceil(n2*1.0/BLOCK_SIZE), ceil(784*1.0/BLOCK_SIZE), 1);
dim3 DimBlockB_2_w1(BLOCK_SIZE, BLOCK_SIZE, 1);
float ms;
hipEvent_t start,stop;
// hipStream_t stream1;
// hipStream_t stream2;
// hipStreamCreate(&stream1);
// hipStreamCreate(&stream2);
// float * d_train;
// float * temp = d_layer1;
// hipMalloc((void **)&d_train, 60000 * 784 * sizeof(float));
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// hipMemcpy(d_train, training_images, 60000 * 784 * sizeof(float), hipMemcpyHostToDevice);
// Begin training
for(int epochs = 0; epochs < 2; epochs++) {
for (int i = 0; i < dataset.training_images.size(); i++) {
label = static_cast<int>(dataset.training_labels[i]);
// Performing forward process
hipMemcpy(d_layer1, &(training_images[i * 784]), 784 * sizeof(float), hipMemcpyHostToDevice);
// d_layer1 = d_train + i * 784;
hipLaunchKernelGGL(( perceptron), dim3(DimGridF), dim3(DimBlockF), 0, 0, d_layer1, d_layer2, d_layer3, d_w1, d_w2, n2);
hipDeviceSynchronize();
/*
// Checking layer1, layer2, layer3 values
if (i == 0) {
hipMemcpy(layer1, d_layer1, 784 * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(layer2, d_layer2, n2 * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(layer3, d_layer3, 10 * sizeof(float), hipMemcpyDeviceToHost);
std::cout << "printing layer1: " << std::endl;
for (int j = 0; j < 784; j++) {
std::cout << layer1[j] << " ";
}
std::cout << std::endl;
std::cout << "printing layer2: " << std::endl;
for (int j = 0; j < n2; j++) {
std::cout << layer2[j] << " ";
}
std::cout << std::endl;
std::cout << "printing layer3: " << std::endl;
for (int j = 0; j < 10; j++) {
std::cout << layer3[j] << " ";
}
std::cout << std::endl;
}
*/
// Performing backpropagation (2 parts)
// part 1
hipLaunchKernelGGL(( back_propagation_1), dim3(DimGridB_1), dim3(DimBlockB_1), 0, 0, d_layer3, d_layer2, d_theta3, d_theta2, d_w2, label, n2);
hipDeviceSynchronize();
// part 2
hipLaunchKernelGGL(( back_propagation_2_w2), dim3(DimGridB_2_w2), dim3(DimBlockB_2_w2), 0, 0, d_w2, d_delta2, d_theta3, d_layer2, learning_rate, momentum, n2);
hipLaunchKernelGGL(( back_propagation_2_w1), dim3(DimGridB_2_w1), dim3(DimBlockB_2_w1), 0, 0, d_w1, d_delta1, d_theta2, d_layer1, learning_rate, momentum, n2);
hipDeviceSynchronize();
/*
// Checking w1 and w2 values
if (i == 0) {
hipMemcpy(w1, d_w1, 784 * n2 * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(w2, d_w2, n2 * 10 * sizeof(float), hipMemcpyDeviceToHost);
std::cout << "printing w1: " << std::endl;
for (int j = 0; j < n2; j++) {
for (int k = 0; k < 784; k++) {
std::cout << w1[j * 784 + k] << " ";
}
std::cout << std::endl;
}
std::cout << "printing w2: " << std::endl;
for (int j = 0; j < 10; j++) {
for (int k = 0; k < n2; k++) {
std::cout << w2[j * n2 + k] << " ";
}
std::cout << std::endl;
}
}
*/
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms, start, stop);
std::cout << "GPU execution time: " << ms << std::endl;
// d_layer1 =temp;
// Copy weights to host
/*
hipMemcpy(w1, d_w1, 784 * n2 * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(w2, d_w2, n2 * 10 * sizeof(float), hipMemcpyDeviceToHost);
std::cout << "printing w1: " << std::endl;
for (int i = 0; i < n2; i++) {
for (int j = 0; j < 784; j++) {
std::cout << w1[i * 784 + j] << " ";
}
std::cout << std::endl;
}
std::cout << "printing w2: " << std::endl;
for (int i = 0; i < 10; i++) {
for (int j = 0; j < n2; j++) {
std::cout << w2[i * n2 + j] << " ";
}
std::cout << std::endl;
}
*/
// Testing
int prediction = 0;
int numCorrect = 0;
for (int i = 0; i < dataset.test_images.size(); i++) {
label = static_cast<int>(dataset.test_labels[i]);
hipMemcpy(d_layer1, &(test_images[i * 784]), 784 * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( perceptron), dim3(DimGridF), dim3(DimBlockF), 0, 0, d_layer1, d_layer2, d_layer3, d_w1, d_w2, n2);
hipDeviceSynchronize();
hipMemcpy(layer3, d_layer3, 10 * sizeof(float), hipMemcpyDeviceToHost);
for (int j = 0; j < 10; j++) {
if (layer3[j] > layer3[prediction]) {
prediction = j;
}
}
if (prediction == label) {
numCorrect++;
}
}
std::cout << "numCorrect: " << numCorrect << std::endl;
std::cout << "test size: " << dataset.test_images.size() << std::endl;
std::cout << "Accuracy: " << numCorrect / (1.0 * dataset.test_images.size()) << std::endl;
// free memory
free(layer1);
free(layer2);
free(layer3);
free(w1);
free(w2);
free(delta1);
free(delta2);
free(training_images);
free(test_images);
// hipHostFree(layer1);
// hipHostFree(layer2);
// hipHostFree(layer3);
// hipHostFree(w1);
// hipHostFree(w2);
// hipHostFree(delta1);
// hipHostFree(delta2);
// hipHostFree(training_images);
// hipHostFree(test_images);
hipFree(d_layer1);
hipFree(d_layer2);
hipFree(d_layer3);
hipFree(d_w1);
hipFree(d_w2);
hipFree(d_theta3);
hipFree(d_theta2);
hipFree(d_delta2);
hipFree(d_delta1);
}
|
5cbca577d99e569a75ff49e16cdecc0e591e34b7.cu
|
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "common/mnist_reader.hpp"
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#include "kernel.h"
#define BLOCK_SIZE 16
#define N2 128
__device__ float sigmoid(float x)
{
float exp_value;
float return_value;
/*** Exponential calculation ***/
exp_value = exp((float) -x);
/*** Final sigmoid value ***/
return_value = 1 / (1 + exp_value);
return return_value;
}
__global__ void back_propagation_1(float* d_layer3, float* d_layer2, float* d_theta3,
float* d_theta2, float* d_w2, int label, int n2) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d_theta3_shared[10];
if (index < 10) {
float expected = (index == label) ? 1.0 : 0.0;
float d_layer3_temp = d_layer3[index];
d_theta3_shared[index] = d_layer3_temp * (1 - d_layer3_temp) * (expected - d_layer3_temp);
d_theta3[index] = d_theta3_shared[index];
}
__syncthreads();
if (index < n2) {
float sum = 0.0;
for (int j = 0; j < 10; j++) {
// --- Column major order
sum += d_w2[index * 10 + j] * d_theta3_shared[j];
// --- Row major order
//sum += d_w2[j * n2 + index] * d_theta3[j];
}
float d_layer2_temp = d_layer2[index];
d_theta2[index] = d_layer2_temp * (1 - d_layer2_temp) * sum;
}
}
__global__ void back_propagation_2_w2(float* d_w2, float* d_delta2, float* d_theta3, float* d_layer2,
float learning_rate, float momentum, int n2) {
// --- Column major order
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float temp;
// int idx = threadIdx.y * blockDim.x + threadIdx.x;
// __shared__ float d_theta3_shared[10];
// __shared__ float d_layer2_shared[N2];
// if (idx<10)
// d_theta3_shared[idx] = d_theta3[idx];
// if (idx<n2)
// d_layer2_shared[idx] = d_layer2[idx];
//
// __syncthreads();
if (j < 10 && i < n2) {
temp = (learning_rate * d_theta3[j] * d_layer2[i]) + (momentum * d_delta2[i * 10 + j]);
d_delta2[i * 10 + j] = temp;
d_w2[i * 10 + j] += temp;
}
// --- Row major order
/*
int j = blockIdx.y * blockDim.y + threadIdx.y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (j < 10 && i < n2) {
d_delta2[j * n2 + i] = (learning_rate * d_theta3[j] * d_layer2[i]) + (momentum * d_delta2[j * n2 + i]);
d_w2[j * n2 + i] += d_delta2[j * n2 + i];
}
*/
}
__global__ void back_propagation_2_w1(float* d_w1, float* d_delta1, float* d_theta2, float* d_layer1,
float learning_rate, float momentum, int n2) {
// --- Column major order
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float temp;
if (j < n2 && i < 784) {
temp = (learning_rate * d_theta2[j] * d_layer1[i]) + (momentum * d_delta1[i * n2 + j]);
d_delta1[i * n2 + j] = temp;
d_w1[i * n2 + j] += temp;
}
// --- Row major order
/*
int j = blockIdx.y * blockDim.y + threadIdx.y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (j < n2 && i < 784) {
d_delta1[j * 784 + i] = (learning_rate * d_theta2[j] * d_layer1[i]) + (momentum * d_delta1[j * 784 + i]);
d_w1[j * 784 + i] += d_delta1[j * 784 + i];
}
*/
}
__global__ void perceptron(float* d_layer1, float* d_layer2, float* d_layer3, float* d_w1,
float* d_w2, int n2) {
__shared__ float x_shared[BLOCK_SIZE];
int index = blockIdx.x * blockDim.x + threadIdx.x;
float y_val = 0.0;
/**
if (index < n2) {
float y_val = 0;
for (int i = 0; i < 784; i++) {
y_val += d_layer1[i] * d_w1[index * 784 + i];
}
d_layer2[index] = 1.0 / (1.0 + exp(-1.0 * y_val));
}
**/
#pragma unroll
for (unsigned int m = 0; m < ceil(784*1.0/BLOCK_SIZE); ++m)
{
if ((m * BLOCK_SIZE + threadIdx.x) < 784){
x_shared[threadIdx.x] = d_layer1[threadIdx.x + m * BLOCK_SIZE];
} else{
x_shared[threadIdx.x] = 0.0;
}
__syncthreads();
#pragma unroll
for (unsigned int e = 0; e < BLOCK_SIZE; ++e) {
// --- Column-major ordering - faster lower Accuracy
y_val += d_w1[index + (e + BLOCK_SIZE * m) * 128] * x_shared[e];
// --- Row-major ordering - slower higher Accuracy
//y_val += d_w1[index * 784 + (e + BLOCK_SIZE * m)] * x_shared[e];
}
__syncthreads();
}
if (index < n2) d_layer2[index] = 1.0 / (1.0 + exp(-1.0 * y_val));
__syncthreads();
// pass through third layer
if (index < 10) {
float val2 = 0.0;
for (int i = 0; i < n2; i++) {
// --- Column-major ordering
val2 += d_layer2[i] * d_w2[i * 10 + index];
// --- Row-major ordering
//val2 += d_layer2[i] * d_w2[index * n2 + i];
}
val2 = 1.0 / (1.0 + exp(-1.0 * val2));
d_layer3[index] = val2;
}
}
void wrapper(mnist::MNIST_dataset<std::vector, std::vector<uint8_t>, uint8_t> dataset) {
int n2;
int label;
float learning_rate;
float momentum;
float* layer1;
float* layer2;
float* layer3;
float* w1;
float* w2;
float* delta2;
float* delta1;
float* d_layer1;
float* d_layer2;
float* d_layer3;
float* d_w1;
float* d_w2;
float* d_theta3;
float* d_theta2;
float* d_delta2;
float* d_delta1;
float* training_images;
float* test_images;
// set the number of nodes in hidden layer
n2 = 128;
learning_rate = 5e-2;
momentum = 0.9;
// Neural Network Architecture 3 layers: 784 -> n2 -> 10
// allocate GPU memory
cudaMalloc((void **)&d_layer1, 784 * sizeof(float));
cudaMalloc((void **)&d_layer2, n2 * sizeof(float));
cudaMalloc((void **)&d_layer3, 10 * sizeof(float));
cudaMalloc((void **)&d_w1, 784 * n2 * sizeof(float));
cudaMalloc((void **)&d_w2, n2 * 10 * sizeof(float));
cudaMalloc((void **)&d_delta1, 784 * n2 * sizeof(float));
cudaMalloc((void **)&d_delta2, n2 * 10 * sizeof(float));
cudaMalloc((void **)&d_theta2, n2 * sizeof(float));
cudaMalloc((void **)&d_theta3, 10 * sizeof(float));
// allocate host memory
layer1 = (float *)malloc(784 * sizeof(float));
layer2 = (float *)malloc(n2 * sizeof(float));
layer3 = (float *)malloc(10 * sizeof(float));
w1 = (float *)malloc(784 * n2 * sizeof(float));
w2 = (float *)malloc(n2 * 10 * sizeof(float));
delta1 = (float *)malloc(784 * n2 * sizeof(float));
delta2 = (float *)malloc(n2 * 10 * sizeof(float));
training_images = (float *)malloc(60000 * 784 * sizeof(float));
test_images = (float *)malloc(10000 * 784 * sizeof(float));
// cudaHostAlloc((void **)&layer1, 784 * sizeof(float),cudaHostAllocPortable);
// cudaHostAlloc((void **)&layer2, n2 * sizeof(float),cudaHostAllocPortable);
// cudaHostAlloc((void **)&layer3, 10 * sizeof(float),cudaHostAllocPortable);
// cudaHostAlloc((void **)&w1, 784 * n2 * sizeof(float),cudaHostAllocPortable);
// cudaHostAlloc((void **)&w2, n2 * 10 * sizeof(float),cudaHostAllocPortable);
// cudaHostAlloc((void **)&delta1, 784 * n2 * sizeof(float),cudaHostAllocPortable);
// cudaHostAlloc((void **)&delta2, n2 * 10 * sizeof(float),cudaHostAllocPortable);
// cudaHostAlloc((void **)&training_images, 60000 * 784 * sizeof(float),cudaHostAllocDefault);
// cudaHostAlloc((void **)&test_images, 10000 * 784 * sizeof(float),cudaHostAllocPortable);
// initialize weights to 0
for (int i = 0; i < n2; i++) {
for (int j = 0; j < 784; j++) {
int sign = rand() % 2;
/*
// Row major ordering w1: (n2 x 784)
w1[i * 784 + j] = (float)(rand() % 6) / 10.0;
if (sign == 1) {
w1[i * 784 + j] = -1 * w1[i * 784 + j];
}
delta1[i * 784 + j] = 0.0;
*/
// Column major ordering w1: (784 x n2)
w1[j * n2 + i] = (float)(rand() % 6) / 10.0;
if (sign == 1) {
w1[j * n2 + i] = -1 * w1[j * n2 + i];
}
delta1[j * n2 + i] = 0.0;
}
}
for (int i = 0; i < 10; i++) {
for (int j = 0; j < n2; j++) {
int sign = rand() % 2;
/*
// Row major ordering w2: (10 x n2)
w2[i * n2 + j] = (float)(rand() % 6) / 10.0;
if (sign == 1) {
w2[i * n2 + j] = -1 * w2[i * n2 + j];
}
delta2[i * n2 + j] = 0.0;
*/
// Column major ordering w2: (n2 x 10)
w2[j * 10 + i] = (float)(rand() % 6) / 10.0;
if (sign == 1) {
w2[j * 10 + i] = -1 * w2[j * 10 + i];
}
delta2[j * 10 + i] = 0.0;
}
}
// convert training and test data to float
for (int i = 0; i < 60000; i++) {
for (int j = 0; j < 784; j++) {
//training_images[i * 784 + j] = (float)dataset.training_images[i][j];
training_images[i * 784 + j] = dataset.training_images[i][j] == 0 ? 0.0 : 1.0;
}
}
for (int i = 0; i < 10000; i++) {
for (int j = 0; j < 784; j++) {
//test_images[i * 784 + j] = (float)dataset.test_images[i][j];
test_images[i * 784 + j] = dataset.test_images[i][j] == 0 ? 0.0 : 1.0;
}
}
// Copy necessary host memory to GPU memory
cudaMemcpy(d_w1, w1, 784 * n2 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w2, w2, n2 * 10 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_delta1, delta1, 784 * n2 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_delta2, delta2, n2 * 10 * sizeof(float), cudaMemcpyHostToDevice);
// Set the block and grid dim
// For forward process: one thread block of size n2 for each image. Currently just using one thread block.
dim3 DimGridF(ceil(128*1.0/BLOCK_SIZE), 1, 1);
dim3 DimBlockF(BLOCK_SIZE, 1, 1);
dim3 DimGridB_1(1, 1, 1);
dim3 DimBlockB_1(n2, 1, 1);
/*
// Row major order weight matrices
dim3 DimGridB_2_w2(ceil(n2/10.0), 1, 1);
dim3 DimBlockB_2_w2(10, 10, 1);
dim3 DimGridB_2_w1(ceil(784*1.0/BLOCK_SIZE), ceil(n2*1.0/BLOCK_SIZE), 1);
dim3 DimBlockB_2_w1(BLOCK_SIZE, BLOCK_SIZE, 1);
*/
// Column major order weight matrices
dim3 DimGridB_2_w2(1, ceil(n2/10.0), 1);
dim3 DimBlockB_2_w2(10, 10, 1);
dim3 DimGridB_2_w1(ceil(n2*1.0/BLOCK_SIZE), ceil(784*1.0/BLOCK_SIZE), 1);
dim3 DimBlockB_2_w1(BLOCK_SIZE, BLOCK_SIZE, 1);
float ms;
cudaEvent_t start,stop;
// cudaStream_t stream1;
// cudaStream_t stream2;
// cudaStreamCreate(&stream1);
// cudaStreamCreate(&stream2);
// float * d_train;
// float * temp = d_layer1;
// cudaMalloc((void **)&d_train, 60000 * 784 * sizeof(float));
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// cudaMemcpy(d_train, training_images, 60000 * 784 * sizeof(float), cudaMemcpyHostToDevice);
// Begin training
for(int epochs = 0; epochs < 2; epochs++) {
for (int i = 0; i < dataset.training_images.size(); i++) {
label = static_cast<int>(dataset.training_labels[i]);
// Performing forward process
cudaMemcpy(d_layer1, &(training_images[i * 784]), 784 * sizeof(float), cudaMemcpyHostToDevice);
// d_layer1 = d_train + i * 784;
perceptron<<<DimGridF, DimBlockF>>>(d_layer1, d_layer2, d_layer3, d_w1, d_w2, n2);
cudaDeviceSynchronize();
/*
// Checking layer1, layer2, layer3 values
if (i == 0) {
cudaMemcpy(layer1, d_layer1, 784 * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(layer2, d_layer2, n2 * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(layer3, d_layer3, 10 * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "printing layer1: " << std::endl;
for (int j = 0; j < 784; j++) {
std::cout << layer1[j] << " ";
}
std::cout << std::endl;
std::cout << "printing layer2: " << std::endl;
for (int j = 0; j < n2; j++) {
std::cout << layer2[j] << " ";
}
std::cout << std::endl;
std::cout << "printing layer3: " << std::endl;
for (int j = 0; j < 10; j++) {
std::cout << layer3[j] << " ";
}
std::cout << std::endl;
}
*/
// Performing backpropagation (2 parts)
// part 1
back_propagation_1<<<DimGridB_1, DimBlockB_1>>>(d_layer3, d_layer2, d_theta3, d_theta2, d_w2, label, n2);
cudaDeviceSynchronize();
// part 2
back_propagation_2_w2<<<DimGridB_2_w2, DimBlockB_2_w2>>>(d_w2, d_delta2, d_theta3, d_layer2, learning_rate, momentum, n2);
back_propagation_2_w1<<<DimGridB_2_w1, DimBlockB_2_w1>>>(d_w1, d_delta1, d_theta2, d_layer1, learning_rate, momentum, n2);
cudaDeviceSynchronize();
/*
// Checking w1 and w2 values
if (i == 0) {
cudaMemcpy(w1, d_w1, 784 * n2 * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(w2, d_w2, n2 * 10 * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "printing w1: " << std::endl;
for (int j = 0; j < n2; j++) {
for (int k = 0; k < 784; k++) {
std::cout << w1[j * 784 + k] << " ";
}
std::cout << std::endl;
}
std::cout << "printing w2: " << std::endl;
for (int j = 0; j < 10; j++) {
for (int k = 0; k < n2; k++) {
std::cout << w2[j * n2 + k] << " ";
}
std::cout << std::endl;
}
}
*/
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
std::cout << "GPU execution time: " << ms << std::endl;
// d_layer1 =temp;
// Copy weights to host
/*
cudaMemcpy(w1, d_w1, 784 * n2 * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(w2, d_w2, n2 * 10 * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "printing w1: " << std::endl;
for (int i = 0; i < n2; i++) {
for (int j = 0; j < 784; j++) {
std::cout << w1[i * 784 + j] << " ";
}
std::cout << std::endl;
}
std::cout << "printing w2: " << std::endl;
for (int i = 0; i < 10; i++) {
for (int j = 0; j < n2; j++) {
std::cout << w2[i * n2 + j] << " ";
}
std::cout << std::endl;
}
*/
// Testing
int prediction = 0;
int numCorrect = 0;
for (int i = 0; i < dataset.test_images.size(); i++) {
label = static_cast<int>(dataset.test_labels[i]);
cudaMemcpy(d_layer1, &(test_images[i * 784]), 784 * sizeof(float), cudaMemcpyHostToDevice);
perceptron<<<DimGridF, DimBlockF>>>(d_layer1, d_layer2, d_layer3, d_w1, d_w2, n2);
cudaDeviceSynchronize();
cudaMemcpy(layer3, d_layer3, 10 * sizeof(float), cudaMemcpyDeviceToHost);
for (int j = 0; j < 10; j++) {
if (layer3[j] > layer3[prediction]) {
prediction = j;
}
}
if (prediction == label) {
numCorrect++;
}
}
std::cout << "numCorrect: " << numCorrect << std::endl;
std::cout << "test size: " << dataset.test_images.size() << std::endl;
std::cout << "Accuracy: " << numCorrect / (1.0 * dataset.test_images.size()) << std::endl;
// free memory
free(layer1);
free(layer2);
free(layer3);
free(w1);
free(w2);
free(delta1);
free(delta2);
free(training_images);
free(test_images);
// cudaFreeHost(layer1);
// cudaFreeHost(layer2);
// cudaFreeHost(layer3);
// cudaFreeHost(w1);
// cudaFreeHost(w2);
// cudaFreeHost(delta1);
// cudaFreeHost(delta2);
// cudaFreeHost(training_images);
// cudaFreeHost(test_images);
cudaFree(d_layer1);
cudaFree(d_layer2);
cudaFree(d_layer3);
cudaFree(d_w1);
cudaFree(d_w2);
cudaFree(d_theta3);
cudaFree(d_theta2);
cudaFree(d_delta2);
cudaFree(d_delta1);
}
|
264e961622569e152ff8ced9371c30053632202c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<addTest.h>
#define DIM 1024
__global__ void reduceUnroll4(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 4 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 4;
if (id + 3 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceUnroll8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceUnrollWarps8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid < 32) {
volatile int *vmen = data;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceCompleteUnrollWarps8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
if (blockDim.x >= 1024 && tid < 512)
data[tid] += data[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
data[tid] += data[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
data[tid] += data[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
data[tid] += data[tid + 64];
__syncthreads();
if (tid < 32) {
volatile int *vmen = data;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceCompleteUnroll(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x;
if (blockDim.x >= 1024 && tid < 512)
data[tid] += data[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
data[tid] += data[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
data[tid] += data[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
data[tid] += data[tid + 64];
__syncthreads();
if (tid < 32) {
volatile int *vmen = data;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceCompleteShareMem(int *src, int *dst, int num) {
__shared__ int mem[DIM];
unsigned int tid = threadIdx.x;
if (tid >= num) return;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
mem[tid] = data[tid];
__syncthreads();
//
if (blockDim.x >= 1024 && tid < 512)
mem[tid] += mem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
mem[tid] += mem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
mem[tid] += mem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
mem[tid] += mem[tid + 64];
__syncthreads();
if (tid < 32) {
volatile int *vmen = mem;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = mem[0];
}
__global__ void reduceUnroll2(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 2 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 2;
if (id + blockDim.x < num) {
src[id] += src[id + blockDim.x];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceNeighboredLess(int* src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
int* data = src + blockIdx.x * blockDim.x;
if (id >= num) return;
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int idx = 2 * tid * stride;
if (idx < blockDim.x) {
data[idx] += data[idx + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceInterieaved(int* src, int *dst, int num) {
// set threadId
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= num) return;
int* data = src + blockIdx.x * blockDim.x;
for (int stride = blockDim.x / 2 ; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0) {
dst[blockIdx.x] = data[0];
}
}
__global__ void reduceNeighbored(int* src, int *dst, int num) {
// set threadId
unsigned int id_thread = threadIdx.x;
if (id_thread >= num) return;
int* data = src + blockIdx.x * blockDim.x;
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((id_thread % (2 * stride)) == 0) {
data[id_thread] += data[stride + id_thread];
}
__syncthreads();
}
if (id_thread == 0) {
dst[blockIdx.x] = data[0];
}
}
// CPU
int reduceNeighbored_cpu(int *data, int num) {
if (num == 1) return data[0];
int const stride = num / 2;
for (int i = 0; i < stride; i++) {
data[i] += data[i + stride];
}
if (num % 2 == 1) {
data[0] += data[num - 1];
}
return reduceNeighbored_cpu(data, stride);
}
int main(void) {
int dev = 0;
initDevice(dev);
int num = 1 << 20;
int* x_h = (int *)malloc(num * sizeof(int));
int* dst_cpu = (int *)malloc(num * sizeof(int));
int* dst_dev_cpu = (int *)malloc(num * sizeof(int));
for(int i = 0; i < num; i++) {
x_h[i] = i % 3;
}
int *x_d, *dst_d;
CHECK(hipMalloc((int**)&x_d, num * sizeof(int)));
CHECK(hipMalloc((int**)&dst_d, num * sizeof(int)));
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
int block = 1024;
int grid = (num + block -1) / block;
printf("grid : %d , block : %d\n", grid, block);
int sum_dev = 0;
//
// reduceNeighbored<<<grid, block>>>(x_d, dst_d, num);
// CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
// for (int i = 0; i < grid; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
// reduceNeighboredLess<<<grid, block>>>(x_d, dst_d, num);
// CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error Less kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
// reduceInterieaved<<<grid, block>>>(x_d, dst_d, num);
// CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
// reduceUnroll2<<<grid / 2, block>>>(x_d, dst_d, num);
// CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid / 2; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
// reduceUnroll4<<<grid / 4, block>>>(x_d, dst_d, num);
// CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid / 4; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error unroll4 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
// reduceUnroll8<<<grid / 8, block>>>(x_d, dst_d, num);
// CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid / 8; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error unroll8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
// reduceUnrollWarps8<<<grid / 8, block>>>(x_d, dst_d, num);
// CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid / 8; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
// reduceCompleteUnrollWarps8<<<grid / 8, block>>>(x_d, dst_d, num);
// CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid / 8; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error Completewarps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceCompleteShareMem), dim3(grid/8), dim3(block), 0, 0, x_d, dst_d, num);
CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid/8; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceCompleteUnroll), dim3(grid), dim3(block), 0, 0, x_d, dst_d, num);
CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
hipFree(x_d);
hipFree(dst_d);
free(x_h);
free(dst_cpu);
free(dst_dev_cpu);
return 0;
}
|
264e961622569e152ff8ced9371c30053632202c.cu
|
#include<iostream>
#include<addTest.h>
#define DIM 1024
__global__ void reduceUnroll4(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 4 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 4;
if (id + 3 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceUnroll8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceUnrollWarps8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid < 32) {
volatile int *vmen = data;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceCompleteUnrollWarps8(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
__syncthreads();
if (blockDim.x >= 1024 && tid < 512)
data[tid] += data[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
data[tid] += data[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
data[tid] += data[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
data[tid] += data[tid + 64];
__syncthreads();
if (tid < 32) {
volatile int *vmen = data;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceCompleteUnroll(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x;
if (blockDim.x >= 1024 && tid < 512)
data[tid] += data[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
data[tid] += data[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
data[tid] += data[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
data[tid] += data[tid + 64];
__syncthreads();
if (tid < 32) {
volatile int *vmen = data;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceCompleteShareMem(int *src, int *dst, int num) {
__shared__ int mem[DIM];
unsigned int tid = threadIdx.x;
if (tid >= num) return;
unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 8;
if (id + 7 * blockDim.x < num) {
src[id] += src[id + blockDim.x];
src[id] += src[id + blockDim.x * 2];
src[id] += src[id + blockDim.x * 3];
src[id] += src[id + blockDim.x * 4];
src[id] += src[id + blockDim.x * 5];
src[id] += src[id + blockDim.x * 6];
src[id] += src[id + blockDim.x * 7];
}
mem[tid] = data[tid];
__syncthreads();
// 该同步操作保证该线程快的所有线程此处保持一致,即复制完成
if (blockDim.x >= 1024 && tid < 512)
mem[tid] += mem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
mem[tid] += mem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
mem[tid] += mem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
mem[tid] += mem[tid + 64];
__syncthreads();
if (tid < 32) {
volatile int *vmen = mem;
vmen[tid] += vmen[tid + 32];
vmen[tid] += vmen[tid + 16];
vmen[tid] += vmen[tid + 8];
vmen[tid] += vmen[tid + 4];
vmen[tid] += vmen[tid + 2];
vmen[tid] += vmen[tid + 1];
}
if (tid == 0)
dst[blockIdx.x] = mem[0];
}
__global__ void reduceUnroll2(int *src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x * 2 + threadIdx.x;
if (tid >= num) return;
int* data = src + blockIdx.x * blockDim.x * 2;
if (id + blockDim.x < num) {
src[id] += src[id + blockDim.x];
}
__syncthreads();
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceNeighboredLess(int* src, int *dst, int num) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
int* data = src + blockIdx.x * blockDim.x;
if (id >= num) return;
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int idx = 2 * tid * stride;
if (idx < blockDim.x) {
data[idx] += data[idx + stride];
}
__syncthreads();
}
if (tid == 0)
dst[blockIdx.x] = data[0];
}
__global__ void reduceInterieaved(int* src, int *dst, int num) {
// set threadId
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= num) return;
int* data = src + blockIdx.x * blockDim.x;
for (int stride = blockDim.x / 2 ; stride > 0; stride >>= 1) {
if (tid < stride) {
data[tid] += data[tid + stride];
}
__syncthreads();
}
if (tid == 0) {
dst[blockIdx.x] = data[0];
}
}
__global__ void reduceNeighbored(int* src, int *dst, int num) {
// set threadId
unsigned int id_thread = threadIdx.x;
if (id_thread >= num) return;
int* data = src + blockIdx.x * blockDim.x;
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((id_thread % (2 * stride)) == 0) {
data[id_thread] += data[stride + id_thread];
}
__syncthreads();
}
if (id_thread == 0) {
dst[blockIdx.x] = data[0];
}
}
// CPU
int reduceNeighbored_cpu(int *data, int num) {
if (num == 1) return data[0];
int const stride = num / 2;
for (int i = 0; i < stride; i++) {
data[i] += data[i + stride];
}
if (num % 2 == 1) {
data[0] += data[num - 1];
}
return reduceNeighbored_cpu(data, stride);
}
int main(void) {
int dev = 0;
initDevice(dev);
int num = 1 << 20;
int* x_h = (int *)malloc(num * sizeof(int));
int* dst_cpu = (int *)malloc(num * sizeof(int));
int* dst_dev_cpu = (int *)malloc(num * sizeof(int));
for(int i = 0; i < num; i++) {
x_h[i] = i % 3;
}
int *x_d, *dst_d;
CHECK(cudaMalloc((int**)&x_d, num * sizeof(int)));
CHECK(cudaMalloc((int**)&dst_d, num * sizeof(int)));
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
int block = 1024;
int grid = (num + block -1) / block;
printf("grid : %d , block : %d\n", grid, block);
int sum_dev = 0;
//
// reduceNeighbored<<<grid, block>>>(x_d, dst_d, num);
// CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
// for (int i = 0; i < grid; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
// reduceNeighboredLess<<<grid, block>>>(x_d, dst_d, num);
// CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error Less kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
// reduceInterieaved<<<grid, block>>>(x_d, dst_d, num);
// CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
// reduceUnroll2<<<grid / 2, block>>>(x_d, dst_d, num);
// CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid / 2; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
// reduceUnroll4<<<grid / 4, block>>>(x_d, dst_d, num);
// CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid / 4; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error unroll4 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
// reduceUnroll8<<<grid / 8, block>>>(x_d, dst_d, num);
// CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid / 8; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error unroll8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
// reduceUnrollWarps8<<<grid / 8, block>>>(x_d, dst_d, num);
// CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid / 8; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
//
// CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
// reduceCompleteUnrollWarps8<<<grid / 8, block>>>(x_d, dst_d, num);
// CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
// sum_dev = 0;
// for (int i = 0; i < grid / 8; i++) {
// sum_dev += dst_dev_cpu[i];
// }
// reduceNeighbored_cpu(x_h, num);
// if (sum_dev != x_h[0])
// printf("Error Completewarps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
reduceCompleteShareMem<<<grid/8, block>>>(x_d, dst_d, num);
CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid/8; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice));
reduceCompleteUnroll<<<grid, block>>>(x_d, dst_d, num);
CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost));
sum_dev = 0;
for (int i = 0; i < grid; i++) {
sum_dev += dst_dev_cpu[i];
}
reduceNeighbored_cpu(x_h, num);
if (sum_dev != x_h[0])
printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]);
cudaFree(x_d);
cudaFree(dst_d);
free(x_h);
free(dst_cpu);
free(dst_dev_cpu);
return 0;
}
|
7adfb7a3ac199c3cd499a31328b406bc10275adf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>
#include <hip/hip_runtime_api.h>
int width = 1000;
int height = 1000;
int numBins = 10;
hipArray *cuArray;
float* imageData;
int* dBins;
int* hBins;
// a reference to a 2D texture where each texture element contains a 1D float value
// hipReadModeElementType specifies that the returned data value should not be normalized
texture<float, 2, hipReadModeElementType> texture_float_2D;
// clamp
inline __device__ float clamp(float f, float a, float b)
{
return fmaxf(a, fminf(f, b));
}
// bins global memory vector to be filled with bin counts
// nbins size of bins vector
// minX the minimum x texture coordinate
// stepX step size in x in texture coordinates
// minY the minimum y texture coordinate
// stepY step size in y in texture coordinates
// minZ data value of the left edge of the left-most bin
// maxZ data value of the right edge of the right-most bin
extern "C" __global__ void calculateHistogram1( int *bins, int nbins,
float minX, float stepX,
float minY, float stepY,
float minZ, float maxZ )
{
// use block and thread ids to get texture coordinates for this thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// convert block/thread ids into texture coordinates
float x = minX + stepX * i;
float y = minY + stepY * j;
// don't over count if texture coordinates are out of bounds
if ( x < 1.0 && y < 1.0 )
{
// perform texture lookup
float result = tex2D(texture_float_2D, x, y);
// calculate bin index
float stepZ = ( maxZ - minZ ) / nbins;
float fbinIndex = floor( ( result - minZ ) / stepZ );
int binIndex = (int) clamp( fbinIndex, 0, nbins-1 );
// atomically add one to the bin corresponding to the data value
atomicAdd( bins+binIndex, 1 );
}
}
void initImageData( float* data )
{
int w,h;
float pi = atan(1) * 4;
for ( w = 0; w < width; w++ )
{
for ( h = 0; h < height; h++ )
{
float x = w / ( float ) width;
float y = h / ( float ) height;
float r = rand() / (float) RAND_MAX;
data[h+w*height] = ( y * y + sin( 2 * pi * x * x ) + r ) * 100;
}
}
}
void init(int argc, char **argv)
{
// size of texture data
unsigned int size = width * height * sizeof(float);
// allocate space for texture data and initialize with interesting function
imageData = (float*) malloc( size );
initImageData( imageData );
// set up CUDA texture description (32 bit float)
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
// create a CUDA array for accessing texture data
hipMallocArray(&cuArray,&channelDesc,width,height);
// copy image data from the host into the CUDA array
hipMemcpyToArray(cuArray, 0, 0, imageData, size, hipMemcpyHostToDevice);
// set texture access modes for the CUDA texture variable
// (clamp access for texture coordinates outside 0 to 1)
texture_float_2D.addressMode[0] = hipAddressModeClamp;
texture_float_2D.addressMode[1] = hipAddressModeClamp;
texture_float_2D.filterMode = hipFilterModeLinear;
texture_float_2D.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
hipBindTextureToArray(texture_float_2D, cuArray, channelDesc);
// Allocate space for histogram bin results
int sizeBins = sizeof( int ) * numBins;
hBins = (int*) malloc( sizeBins );
hipMalloc( &dBins, sizeBins );
}
void calculateHistogram(void)
{
int sizeBins = sizeof( int ) * numBins;
hipMemset( dBins, 0, sizeBins );
// calculate block and grid dimensions
dim3 dimBlock( 16, 16, 1);
int gridX = ceil( width / (float) dimBlock.x );
int gridY = ceil( height / (float) dimBlock.y );
dim3 dimGrid( gridX, gridY, 1);
// run the kernel over the whole texture
float stepX = 1.0 / width;
float stepY = 1.0 / height;
float minZ = -50.0;
float maxZ = 200.0;
hipLaunchKernelGGL(( calculateHistogram1), dim3(dimGrid), dim3(dimBlock), 0, 0, dBins, numBins, 0, stepX, 0, stepY, minZ, maxZ );
// copy results back to host
hipMemcpy( hBins, dBins, sizeBins, hipMemcpyDeviceToHost );
// print results
int sum = 0;
int i;
for ( i = 0 ; i < numBins ; i++ )
{
sum += hBins[i];
printf( "%d\n", hBins[i] );
}
printf( "sum %d\n", sum );
}
//Main program
int main(int argc, char **argv)
{
printf("CUDA Histogram Calculator\n");
init( argc, argv );
calculateHistogram( );
free( hBins );
free( imageData );
hipFree(dBins);
hipFreeArray(cuArray);
return 0;
}
|
7adfb7a3ac199c3cd499a31328b406bc10275adf.cu
|
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>
#include <cuda_runtime_api.h>
int width = 1000;
int height = 1000;
int numBins = 10;
cudaArray *cuArray;
float* imageData;
int* dBins;
int* hBins;
// a reference to a 2D texture where each texture element contains a 1D float value
// cudaReadModeElementType specifies that the returned data value should not be normalized
texture<float, 2, cudaReadModeElementType> texture_float_2D;
// clamp
inline __device__ float clamp(float f, float a, float b)
{
return fmaxf(a, fminf(f, b));
}
// bins global memory vector to be filled with bin counts
// nbins size of bins vector
// minX the minimum x texture coordinate
// stepX step size in x in texture coordinates
// minY the minimum y texture coordinate
// stepY step size in y in texture coordinates
// minZ data value of the left edge of the left-most bin
// maxZ data value of the right edge of the right-most bin
extern "C" __global__ void calculateHistogram1( int *bins, int nbins,
float minX, float stepX,
float minY, float stepY,
float minZ, float maxZ )
{
// use block and thread ids to get texture coordinates for this thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// convert block/thread ids into texture coordinates
float x = minX + stepX * i;
float y = minY + stepY * j;
// don't over count if texture coordinates are out of bounds
if ( x < 1.0 && y < 1.0 )
{
// perform texture lookup
float result = tex2D(texture_float_2D, x, y);
// calculate bin index
float stepZ = ( maxZ - minZ ) / nbins;
float fbinIndex = floor( ( result - minZ ) / stepZ );
int binIndex = (int) clamp( fbinIndex, 0, nbins-1 );
// atomically add one to the bin corresponding to the data value
atomicAdd( bins+binIndex, 1 );
}
}
void initImageData( float* data )
{
int w,h;
float pi = atan(1) * 4;
for ( w = 0; w < width; w++ )
{
for ( h = 0; h < height; h++ )
{
float x = w / ( float ) width;
float y = h / ( float ) height;
float r = rand() / (float) RAND_MAX;
data[h+w*height] = ( y * y + sin( 2 * pi * x * x ) + r ) * 100;
}
}
}
void init(int argc, char **argv)
{
// size of texture data
unsigned int size = width * height * sizeof(float);
// allocate space for texture data and initialize with interesting function
imageData = (float*) malloc( size );
initImageData( imageData );
// set up CUDA texture description (32 bit float)
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
// create a CUDA array for accessing texture data
cudaMallocArray(&cuArray,&channelDesc,width,height);
// copy image data from the host into the CUDA array
cudaMemcpyToArray(cuArray, 0, 0, imageData, size, cudaMemcpyHostToDevice);
// set texture access modes for the CUDA texture variable
// (clamp access for texture coordinates outside 0 to 1)
texture_float_2D.addressMode[0] = cudaAddressModeClamp;
texture_float_2D.addressMode[1] = cudaAddressModeClamp;
texture_float_2D.filterMode = cudaFilterModeLinear;
texture_float_2D.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
cudaBindTextureToArray(texture_float_2D, cuArray, channelDesc);
// Allocate space for histogram bin results
int sizeBins = sizeof( int ) * numBins;
hBins = (int*) malloc( sizeBins );
cudaMalloc( &dBins, sizeBins );
}
void calculateHistogram(void)
{
int sizeBins = sizeof( int ) * numBins;
cudaMemset( dBins, 0, sizeBins );
// calculate block and grid dimensions
dim3 dimBlock( 16, 16, 1);
int gridX = ceil( width / (float) dimBlock.x );
int gridY = ceil( height / (float) dimBlock.y );
dim3 dimGrid( gridX, gridY, 1);
// run the kernel over the whole texture
float stepX = 1.0 / width;
float stepY = 1.0 / height;
float minZ = -50.0;
float maxZ = 200.0;
calculateHistogram1<<<dimGrid, dimBlock, 0>>>( dBins, numBins, 0, stepX, 0, stepY, minZ, maxZ );
// copy results back to host
cudaMemcpy( hBins, dBins, sizeBins, cudaMemcpyDeviceToHost );
// print results
int sum = 0;
int i;
for ( i = 0 ; i < numBins ; i++ )
{
sum += hBins[i];
printf( "%d\n", hBins[i] );
}
printf( "sum %d\n", sum );
}
//Main program
int main(int argc, char **argv)
{
printf("CUDA Histogram Calculator\n");
init( argc, argv );
calculateHistogram( );
free( hBins );
free( imageData );
cudaFree(dBins);
cudaFreeArray(cuArray);
return 0;
}
|
18824a2d66fdf8f2b81715d2e72f255255c43e06.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "toinvgrayscale.hpp"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
__global__ void toinvgrayscaleKernel(unsigned int* device_data,
const unsigned int frame_position,
const unsigned int width, const unsigned int height, const unsigned int channels,
const unsigned int frame_position_target) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < width * height) {
unsigned char* frame = (unsigned char*)&device_data[frame_position];
unsigned char* target_frame = (unsigned char*)&device_data[frame_position_target];
int current_x = (i % width);
int current_y = (i / width);
float value = 0.0f;
for (int c = 0; c < channels; c++) {
value += frame[current_y * (width * channels) + current_x * channels + c];
}
target_frame[current_y * width + current_x] = 255 - (unsigned char)roundf(value / (float)channels);
}
}
void launch_toinvgrayscale(unsigned int* device_data,
const unsigned int frame_position,
const unsigned int width, const unsigned int height, const unsigned int channels,
const unsigned int frame_position_target) {
hipError_t err = hipSuccess;
int threadsPerBlock = 256;
int blocksPerGrid = (width * height + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( toinvgrayscaleKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, device_data, frame_position, width, height, channels, frame_position_target);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed in toinvgrayscaleKernel (error code %s)\n", hipGetErrorString(err));
}
}
|
18824a2d66fdf8f2b81715d2e72f255255c43e06.cu
|
#include "toinvgrayscale.hpp"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
__global__ void toinvgrayscaleKernel(unsigned int* device_data,
const unsigned int frame_position,
const unsigned int width, const unsigned int height, const unsigned int channels,
const unsigned int frame_position_target) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < width * height) {
unsigned char* frame = (unsigned char*)&device_data[frame_position];
unsigned char* target_frame = (unsigned char*)&device_data[frame_position_target];
int current_x = (i % width);
int current_y = (i / width);
float value = 0.0f;
for (int c = 0; c < channels; c++) {
value += frame[current_y * (width * channels) + current_x * channels + c];
}
target_frame[current_y * width + current_x] = 255 - (unsigned char)roundf(value / (float)channels);
}
}
void launch_toinvgrayscale(unsigned int* device_data,
const unsigned int frame_position,
const unsigned int width, const unsigned int height, const unsigned int channels,
const unsigned int frame_position_target) {
cudaError_t err = cudaSuccess;
int threadsPerBlock = 256;
int blocksPerGrid = (width * height + threadsPerBlock - 1) / threadsPerBlock;
toinvgrayscaleKernel<<<blocksPerGrid, threadsPerBlock>>> (device_data, frame_position, width, height, channels, frame_position_target);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed in toinvgrayscaleKernel (error code %s)\n", cudaGetErrorString(err));
}
}
|
cb8c9170ae27bc8adc57789513f75390ced6525a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::device_vector<int> dv_in(idata, idata + n);
thrust::device_vector<int> dv_out(odata, odata + n);
//timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
//timer().endGpuTimer();
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
|
cb8c9170ae27bc8adc57789513f75390ced6525a.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::device_vector<int> dv_in(idata, idata + n);
thrust::device_vector<int> dv_out(odata, odata + n);
//timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
//timer().endGpuTimer();
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
|
256f4513598a1b7a4f3c64bbd436aa6f38343e02.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@precisions normal z -> c d s
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
__global__ void
magma_zlobpcg_maxpy_kernel( magma_int_t num_rows,
magma_int_t num_vecs,
magmaDoubleComplex *X,
magmaDoubleComplex *Y){
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ ){
Y[ row + i*num_rows ] += X[ row + i*num_rows ];
}
}
}
/**
Purpose
-------
This routine computes a axpy for a mxn matrix:
Y = X + Y
It replaces:
magma_zaxpy(m*n, c_one, Y, 1, X, 1);
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
X magmaDoubleComplex*
input vector X
@param
Y magmaDoubleComplex*
input/output vector Y
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_maxpy( magma_int_t num_rows,
magma_int_t num_vecs,
magmaDoubleComplex *X,
magmaDoubleComplex *Y){
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( (num_rows+block_size-1)/block_size );
hipLaunchKernelGGL(( magma_zlobpcg_maxpy_kernel), dim3(grid), dim3(block), 0, magma_stream ,
num_rows, num_vecs, X, Y );
return MAGMA_SUCCESS;
}
|
256f4513598a1b7a4f3c64bbd436aa6f38343e02.cu
|
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@precisions normal z -> c d s
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
__global__ void
magma_zlobpcg_maxpy_kernel( magma_int_t num_rows,
magma_int_t num_vecs,
magmaDoubleComplex *X,
magmaDoubleComplex *Y){
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ ){
Y[ row + i*num_rows ] += X[ row + i*num_rows ];
}
}
}
/**
Purpose
-------
This routine computes a axpy for a mxn matrix:
Y = X + Y
It replaces:
magma_zaxpy(m*n, c_one, Y, 1, X, 1);
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
X magmaDoubleComplex*
input vector X
@param
Y magmaDoubleComplex*
input/output vector Y
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_maxpy( magma_int_t num_rows,
magma_int_t num_vecs,
magmaDoubleComplex *X,
magmaDoubleComplex *Y){
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( (num_rows+block_size-1)/block_size );
magma_zlobpcg_maxpy_kernel<<< grid, block, 0, magma_stream >>>
( num_rows, num_vecs, X, Y );
return MAGMA_SUCCESS;
}
|
9cf8c44871fb628ed650c003f48659f293eab31e.hip
|
// !!! This is a file automatically generated by hipify!!!
//nvcc colliprev.cu -o test -lstdc++ -lpthread -lcufft -lpcap -std=c++11
#include <pcap.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <pthread.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <netinet/ip_icmp.h>
#include <net/ethernet.h>
#include <netinet/if_ether.h>
#include <netinet/ether.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <errno.h>
#include <fcntl.h>
#include <math.h>
#include <memory.h>
#include <malloc.h>
#include <iostream>
//--------------CUDA----------------
#include <hip/hip_runtime.h>
#include <hipfft.h>
//-------------------------------------
// ----------------------------------------
#define PI 3.1415926f
#define UWC 1500.0f //
#define FS 250000 //
#define threadsPerBlock 512
#define d 0.07f
#define FL 90000.0f
#define FH 100000.0f
#define TL 17
#define CHANNUM 16
#define FRAMELEN 6800
#define DOWNSAMPLE 1
#define FIRORDER 256
#define FILTER_FRAME FRAMELEN
#define NFFT FRAMELEN //
#define BEAMNUM 91
#define THREADNUMPERBLK 200
#define ARRAYNUM 15
#define STARTBEAM 15
#define ENDBEAM 75
#define MAXTRACETARNUM 3
#define M 3
#define ONLINEMODE 0
#define FILEMODE 1
#define DEST_PORT 0
#define PSD_LEN 20
#define THETANUM 3 //
#define PHINUM 9 //
#define NREF 250 //
#define FRAMELEN 6800
#define NHILBT FRAMELEN //
#define ELENUM 12 //
#define NNZERO 200 //
#define NFIR (FILTER_FRAME+NNZERO) //
#define NMAT FRAMELEN //
#define DIRECTARRIVENUM 30
// -----------------------------------------------------
void *ReadBoard0Data(void *lParam);
void *ActiveReceiveNetwork(void *lParam);
void *ActiveDataFormatting(void *lParam);
void *ActiveSignalProcessing(void *lParam);
//------------------------------------------------------
pthread_mutex_t count_lock_Board0DataReady;
pthread_mutex_t count_lock_ActiveFrameDataReady;
pthread_cond_t cond_Board0DataReady;
pthread_cond_t cond_ActiveFrameDataReady;
unsigned int count_Board0DataReady;
unsigned int count_ActiveFrameDataReady;
//-----------------------------------------------------
int *DataBufA_B1 = NULL;//16Channel
float *ChannDataBufA=NULL;//16Channel
float *ChannDataBuf=NULL;//12Channel
//---------------------------------------------------
int fir1(int n,int band,float fl,float fh,float fs,int wn, float *h);
float window(int type,int n,int i,float beta);
float kaiser(int i,int n,float beta);
float bessel0(float x);
void findpeak(float *data, int *p,int dn);
void findvalley(float *data, int *p,int dn);
bool peakdetection(int beamidx,float *be,int *valley,float threshold);
void rbub(float *p,int *idx,int n);
// -----------------------------------------------------------
float rsRef[NREF]={0.0};//
float theta[3]={1.3963, 1.5708, 1.7453};//:80-100
float phi[9]={0.8727, 1.0472, 1.2217, 1.3963 , 1.5708, 1.7453 , 1.9199 , 2.0944 , 2.2689};//
float xEle[12]={0.0};//x
float zEle[12]={0.0};//z
float dTime[THETANUM*PHINUM*ELENUM]={0.0};//
// -----------------------------------------------------------
int main(){
pthread_t t_ActiveReceiveNetworkData;
pthread_t t_ActiveDataFormatting;
pthread_t t_ActiveSignalProcessing;
pthread_t t_ReadBoard0Data;
cond_Board0DataReady = PTHREAD_COND_INITIALIZER;
cond_ActiveFrameDataReady = PTHREAD_COND_INITIALIZER;
count_lock_Board0DataReady = PTHREAD_MUTEX_INITIALIZER;
count_lock_ActiveFrameDataReady = PTHREAD_MUTEX_INITIALIZER;
pthread_create(&t_ActiveSignalProcessing,NULL,ActiveSignalProcessing,(void *)NULL);
pthread_create(&t_ActiveDataFormatting,NULL,ActiveDataFormatting,(void *)NULL);
#if ONLINEMODE
pthread_create(&t_ActiveReceiveNetworkData,NULL,ActiveReceiveNetwork,(void *)NULL);
#endif
#if FILEMODE
pthread_create(&t_ReadBoard0Data,NULL,ReadBoard0Data,(void *)NULL);
#endif
pthread_join(t_ActiveSignalProcessing, NULL);
return 0;
}
int fir1(int n,int band,float fl,float fh,float fs,int wn, float *h)
{
int i,n2,mid;
float sum = 0;
float s,wc1,wc2,beta = 0,delay;
float fln = fl / fs;
float fhn = fh / fs;
beta = 6;
if((n%2)==0)
{
n2=n/2-1;
mid=1;
}
else
{
n2=n/2;
mid=0;
}
delay=n/2.0;
wc1=2.0*PI*fln;
if(band>=3) wc2=2.0*PI*fhn;
switch(band)
{
case 1://
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc1*s)/(PI*s))*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=wc1/PI;
for(i=0;i<=n;i++)
{
sum=sum+*(h+i);
}
for(i=0;i<=n;i++)
{
*(h+i)=*(h+i)/fabs(sum);
}
break;
}
case 2: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(PI*s)-sin(wc1*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=1.0-wc1/PI;
break;
}
case 3: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc2*s)-sin(wc1*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=(wc2-wc1)/PI;
break;
}
case 4: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc1*s)+sin(PI*s)-sin(wc2*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=(wc1+PI-wc2)/PI;
break;
}
}
return 0;
}
float window(int type,int n,int i,float beta)
{
int k;
float w=1.0;
switch(type)
{
case 1: //
{
w=1.0;
break;
}
case 2: //
{
k=(n-2)/10;
if(i<=k) w=0.5*(1.0-cos(i*PI/(k+1)));
if(i>n-k-2) w=0.5*(1.0-cos((n-i-1)*PI/(k+1)));
break;
}
case 3: //
{
w=1.0-fabs(1.0-2*i/(n-1.0));
break;
}
case 4: //
{
w=0.5*(1.0-cos(2*i*PI/(n-1.0)));
break;
}
case 5: //
{
w=0.54-0.46*cos(2*i*PI/(n-1.0));
break;
}
case 6: //
{
w=0.42-0.5*cos(2*i*PI/(n-1.0))+0.08*cos(4*i*PI/(n-1.0));
break;
}
case 7: //
{
w=kaiser(i,n,beta);
break;
}
}
return(w);
}
float kaiser(int i,int n,float beta) //
{
float a,w,a2,b1,b2,beta1;
b1=bessel0(beta);
a=2.0*i/(float)(n-1)-1.0;
a2=a*a;
beta1=beta*sqrt(1.0-a2);
b2=bessel0(beta1);
w=b2/b1;
return(w);
}
float bessel0(float x) //
{
int i;
float dd,y,d2,sum = 0;
y=x/2.0;
dd=1.0;
for(i=1;i<=25;i++)
{
dd=dd*y/i;
d2=dd*dd;
sum=sum+d2;
if(d2<sum*(1.0e-8)) break;
}
return(sum);
}
__global__ void PhiShiftFactorGen(hipfftComplex *XNSS)
{
int bid = 0,tid = 0;
float tt = 0.0f;
float angle=0.0f;
float det[ARRAYNUM];
float MovePoints[ARRAYNUM];
bid = blockIdx.x;
tid = threadIdx.x;
angle=float(tid*PI/(BEAMNUM-1));
for(int i=0;i<ARRAYNUM;i++)
{
det[i]=i*d*cos(angle)/UWC;
MovePoints[i]=det[i]*FS/DOWNSAMPLE;
tt=MovePoints[i]*2*PI*bid/NFFT;
XNSS[tid*ARRAYNUM*NFFT/2+i*NFFT/2+bid].x = cos(tt);
XNSS[tid*ARRAYNUM*NFFT/2+i*NFFT/2+bid].y = sin(tt);
}
}
void findpeak(float *data, int *p,int dn)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
for(i=0;i<dn;i++)
{
a0=*(data+i);
//
for(j=1;j<11;j++)
{
if ((i+j)>=dn)
{
a1=*(data+i+j-dn);
}
else
{
a1=*(data+i+j);
}
if (a0>a1)
{
acc=acc+1;
}
}
a0=*(data+i);
//
for(j=1;j<11;j++)
{
if ((i-j)<0)
{
a1=*(data+i-j+dn);
}
else
{
a1=*(data+i-j);
}
if (a0>a1)
{
acc1=acc1+1;
}
}
if ((acc==10) && (acc1==10))
{
*(p+i)=1;
}
acc=0;
acc1=0;
}
}
void findvalley(float *data, int *p,int dn)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
for(i=0;i<dn;i++)
{
a0=*(data+i);
//
for(j=1;j<6;j++)
{
if ((i+j)>=dn)
{
break;
}
else
{
a1=*(data+i+j);
}
if (a0<a1)
{
acc=acc+1;
}
}
if(j<5) //
{
acc = 5;
}
a0=*(data+i);
//
for(j=1;j<6;j++)
{
if ((i-j)<0)
{
break;
}
else
{
a1=*(data+i-j);
}
if (a0<a1)
{
acc1=acc1+1;
}
}
if(j<5) //
{
acc1 = 5;
}
if ((acc==5) && (acc1==5))
{
*(p+i)=1;
}
acc=0;
acc1=0;
}
}
bool peakdetection(int beamidx,float *be,int *valley,float threshold)
{
int index = 0,ll=0;
float pvr1 = 1.0,pvr2 = 1.0;
if(beamidx >= STARTBEAM && beamidx <= ENDBEAM)
{
for(ll=beamidx+1;ll<BEAMNUM;ll++)
{
if(valley[ll] == 1)
{
index = ll;
break;
}
}
if(ll<=BEAMNUM-1)
{
pvr1 = be[beamidx] / be[index];
}
for(ll=beamidx-1;ll>=0;ll--)
{
if(valley[ll] == 1)
{
index = ll;
break;
}
}
if(ll>=0)
{
pvr2 = be[beamidx] / be[index];
}
if(pvr1 >= threshold && pvr2 >= threshold)
{
return true;
}
else
{
return false;
}
}
else
{
return false;
}
}
void rbub(float *p,int *idx,int n)
{
int m,k,j,i,xx;
float dd;
k=0;
m=n-1;
while (k<m)
{
j=m-1; m=0;
for(i=k; i<=j; i++)
{
if(p[i]<p[i+1])
{
dd=p[i];
p[i]=p[i+1];
p[i+1]=dd;
xx = idx[i];
idx[i] = idx[i+1];
idx[i+1] = xx;
m=i;
}
}
j=k+1;
k=0;
for (i=m; i>=j; i--)
{
if(p[i-1]<p[i])
{
dd=p[i];
p[i]=p[i-1];
p[i-1]=d;
xx = idx[i];
idx[i] = idx[i-1];
idx[i-1] = xx;
k=i;
}
}
}
return;
}
__global__ void FD_Beamform(hipfftComplex *dev_fft,hipfftReal *dev_energy,hipfftComplex *PhiArray,int nfl,int nfh)
{
__shared__ float Mabs[THREADNUMPERBLK];
float tempX=0.0f;
float tempY=0.0f;
hipComplex XNSS;
hipComplex XFFTafterPinYi;
float ax = 0.0f,ay=0.0f,bx=0.0f,by=0.0f;
float energyEachBoShu = 0.0f;
int bid = 0,tid = 0;
int beamidx = 0, freqidx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
beamidx = bid % BEAMNUM;
freqidx = bid / BEAMNUM*THREADNUMPERBLK+tid;
if(tid==0)
{
memset(Mabs,0,sizeof(float)*THREADNUMPERBLK);
}
__syncthreads();
//
tempX=0.0;
tempY=0.0;
for(int i=0;i<ARRAYNUM;i++)
{
XNSS.x=PhiArray[beamidx*ARRAYNUM*(NFFT/2)+i*(NFFT/2)+freqidx].x;
XNSS.y=PhiArray[beamidx*ARRAYNUM*(NFFT/2)+i*(NFFT/2)+freqidx].y;
ax=dev_fft[i*(NFFT/2+1)+freqidx].x;
ay=dev_fft[i*(NFFT/2+1)+freqidx].y;
bx=XNSS.x;
by=XNSS.y;
if (freqidx>= nfl && freqidx<=nfh)
{
XFFTafterPinYi.x=ax*bx-ay*by;
XFFTafterPinYi.y=ax*by+bx*ay;
}
else
{
XFFTafterPinYi.x=0;
XFFTafterPinYi.y=0;
}
tempX=tempX+ XFFTafterPinYi.x;
tempY=tempY+ XFFTafterPinYi.y;
}
Mabs[tid]=tempX*tempX+tempY*tempY;
//
__syncthreads();
//
if(tid==0)
{
energyEachBoShu=0.0f;
for(int k=0;k<THREADNUMPERBLK;k++)
{
energyEachBoShu=energyEachBoShu+Mabs[k];
}
dev_energy[bid]= energyEachBoShu;
}
}
__global__ void MatrixSumRow(hipfftReal *dev_energy,hipfftReal *sum_energy,int nrow,int ncol)
{
int bid = 0;
int row = 0,col = 0;
float sum = 0.0;
bid = blockIdx.x;
row = nrow;
col = ncol;
for(int ii = 0;ii<row;ii++)
{
sum = sum+dev_energy[ii*col+bid];
}
sum_energy[bid] = sum;
}
__global__ void DownSamplingFilter(hipfftComplex *dev_fft_sig,hipfftComplex *dev_fft_filter,hipfftComplex *dev_fft_yk,int FFTN)//needchange
{
int bid = 0,tid = 0;
hipComplex Sigk;
hipComplex Hk;
int chanIdx = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
chanIdx = bid % (CHANNUM*2);
freqIdx = bid / (CHANNUM*2)*THREADNUMPERBLK+tid;
Sigk.x = dev_fft_sig[chanIdx*FFTN+freqIdx].x;
Sigk.y = dev_fft_sig[chanIdx*FFTN+freqIdx].y;
Hk.x = dev_fft_filter[freqIdx].x;
Hk.y = dev_fft_filter[freqIdx].y;
dev_fft_yk[chanIdx*FFTN+freqIdx].x = Sigk.x*Hk.x-Sigk.y*Hk.y;
dev_fft_yk[chanIdx*FFTN+freqIdx].y = Sigk.x*Hk.y+Sigk.y*Hk.x;
}
__global__ void DelayFilterGen(float *h,int m,float theta,float *tau,int *dI)
{
int bid = 0,tid = 0;
int k=0;
float dfs = 0.0;
int DI = 0;
__shared__ float sum;
bid = blockIdx.x;
tid = threadIdx.x;
if(tid == 0)
{
sum = 0.0;
dfs = bid*d*cos(theta/180.0*PI)/UWC*(FS/DOWNSAMPLE);
DI = int(bid*d*cos(theta/180.0*PI)/UWC*(FS/DOWNSAMPLE)+0.5);
tau[bid] =dfs-DI;
dI[bid] = DI;
//printf("bid=%d,m=%d,theta = %.3f,dfs = %.3f,DI = %d\n",bid,m,theta,dfs,DI);
}
//
__syncthreads();
k = tid-m;
h[bid*(2*m+1)+tid] = sin(k*1.0*PI-tau[bid]*PI+0.000001)/(k*1.0*PI-tau[bid]*PI+0.000001);
//
__syncthreads();
if(tid == 0)
{
for(int k=0;k<2*m+1;k++)
{
sum = sum + h[bid*(2*m+1)+k];
}
}
__syncthreads();
h[bid*(2*m+1)+tid] = h[bid*(2*m+1)+tid]/sum;
}
__global__ void FineDelayFilter(hipfftReal *dev_xin,hipfftReal *dev_yout,hipfftReal *delayfilter,int m)
{
int bid,tid;
float x=0.0,h=0.0;
float sum = 0.0;
bid = blockIdx.x;
tid = threadIdx.x;
__shared__ float y[2*M+1];
if(tid == 0)
{
for(int ii=0;ii<2*m;ii++)
{
y[ii] = 0.0;
}
}
if(bid-2*m+tid >= 0 && bid-2*m+tid < (FILTER_FRAME/DOWNSAMPLE))
{
x = dev_xin[bid-2*m+tid];
}
if(2*m-tid >=0)
{
h = delayfilter[2*m-tid];
}
y[tid] = x*h;
//if(bid == 24855)
//{
// printf("bid = %d,x=%.8f,h=%.8f,y=%.8f\n",bid,x,h,y);
//}
//
__syncthreads();
if(tid == 0)
{
sum = 0.0;
for(int jj=0;jj<2*m+1;jj++)
{
sum = sum + y[jj];
}
dev_yout[bid] = sum;
//if(bid == 24855)
//{
// printf("bid = %d,dev_yout=%.8f\n",bid,dev_yout[bid]);
//}
}
}
__global__ void Psd(hipfftComplex *Xk,hipfftReal *Xabs, int N)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
Xabs[freqIdx] = (Xk[freqIdx].x*Xk[freqIdx].x+Xk[freqIdx].y*Xk[freqIdx].y) / N;
}
__global__ void ActiveFilter(hipfftComplex *dev_fft_sig,hipfftComplex *dev_fft_filter,hipfftComplex *dev_fft_yk,int FFTN)
{
int bid = 0,tid = 0;
hipComplex Sigk;
hipComplex Hk;
int chanIdx = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
chanIdx = bid % (ELENUM);
freqIdx = bid / ELENUM*THREADNUMPERBLK+tid;
Sigk.x = dev_fft_sig[chanIdx*FFTN+freqIdx].x;
Sigk.y = dev_fft_sig[chanIdx*FFTN+freqIdx].y;
Hk.x = dev_fft_filter[freqIdx].x;
Hk.y = dev_fft_filter[freqIdx].y;
dev_fft_yk[chanIdx*FFTN+freqIdx].x = Sigk.x*Hk.x-Sigk.y*Hk.y;
dev_fft_yk[chanIdx*FFTN+freqIdx].y = Sigk.x*Hk.y+Sigk.y*Hk.x;
}
__global__ void ActiveDelayFilterGen(hipfftReal *h, int *dI, hipfftReal *dF, float *delaytime,int index)//
{
// h-
// dI-
// dF-
// index-
int bid = 0,tid = 0;
int k=0;
float dfs = 0.0;
int DI = 0;
__shared__ float sum;
//__shared__ float dfs;
bid = blockIdx.x;//
tid = threadIdx.x;//
if(tid == 0)
{
sum = 0.0;
dfs = delaytime[index+bid];//
DI = int(dfs);//
dF[bid] =dfs-DI;//
dI[bid] = DI;
//printf("bid=%d,m=%d,theta = %.3f,dfs = %.3f,DI = %d\n",bid,m,theta,dfs,DI);
}
//
__syncthreads();
k = tid-M;
h[bid*(2*M+1)+tid] = sin(k*1.0*PI-dF[bid]*PI+0.000001)/(k*1.0*PI-dF[bid]*PI+0.000001);
//
__syncthreads();
if(tid == 0)
{
for(int k=0;k<2*M+1;k++)
{
sum = sum + h[bid*(2*M+1)+k];
}
}
__syncthreads();
h[bid*(2*M+1)+tid] = h[bid*(2*M+1)+tid]/sum;
}
__global__ void ActiveFineDelayFilter(hipfftReal *dev_xin,hipfftReal *dev_yout,hipfftReal *delayfilter)//
{
int bid,tid;
float x=0.0,h=0.0;
float sum = 0.0;
bid = blockIdx.x;//
tid = threadIdx.x;//
__shared__ float y[2*M+1];
if(tid == 0)
{
for(int ii=0;ii<2*M+1;ii++)
{
y[ii] = 0.0;
}
}
if(bid+tid >= M && bid+tid <= FRAMELEN+M)
{
x = dev_xin[bid-M+tid];
}
if(2*M-tid >=0)
{
h = delayfilter[2*M-tid];
}
y[tid] = x*h;
//
__syncthreads();
if(tid == 0)
{
sum = 0.0;
for(int jj=0;jj<2*M+1;jj++)
{
sum = sum + y[jj];
}
dev_yout[bid] = sum;
}
}
__global__ void VectorMultiplier(hipfftComplex *dev_in,hipfftComplex *dev_h,hipfftComplex *dev_out)
{
int bid=blockIdx.x;
dev_out[bid].x=dev_in[bid].x*dev_h[bid].x-dev_in[bid].y*dev_h[bid].y;
dev_out[bid].y=dev_in[bid].x*dev_h[bid].y+dev_in[bid].y*dev_h[bid].x;
}
__global__ void HilbFilt(hipfftComplex *dev_hilboutfreq, hipfftComplex *dev_matchdatafreq, int mid) //Hilbert
{
int bid=blockIdx.x;
float xx=dev_matchdatafreq[bid].x;
float yy=dev_matchdatafreq[bid].y;
if(bid<=mid)
{
dev_hilboutfreq[bid].x=yy;
dev_hilboutfreq[bid].y=-xx;
}
else{
dev_hilboutfreq[bid].x=-yy;
dev_hilboutfreq[bid].y=xx;
}
}
__global__ void DevFindPeak(hipfftReal *dev_beamdata,int *dev_peak,int datalen)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
int bid=blockIdx.x;
// int tid=threadIdx.x;
for(i=0;i<datalen;i++)
{
a0=*(dev_beamdata+bid*datalen+i);
//
for(j=1;j<11;j++)
{
if ((i+j)>=datalen)
{
a1=*(dev_beamdata+bid*datalen+i+j-datalen);
}
else
{
a1=*(dev_beamdata+bid*datalen+i+j);
}
if (a0>a1)
{
acc=acc+1;
}
}
a0=*(dev_beamdata+bid*datalen+i);
////
for(j=1;j<11;j++)
{
if ((i-j)<0)
{
a1=*(dev_beamdata+bid*datalen+i-j+datalen);
}
else
{
a1=*(dev_beamdata+bid*datalen+i-j);
}
if (a0>a1)
{
acc1=acc1+1;
}
}
if ((acc==10) && (acc1==10))
{
//if(bid == 0)
//{
// printf("%d:%.1f\n",i,*(dev_beamdata+bid*datalen+i));
//}
*(dev_peak+bid*datalen+i)=1;
}
acc=0;
acc1=0;
}
}
__global__ void DevFindValley(hipfftReal *dev_beamdata,int *dev_valley,int datalen)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
int bid=blockIdx.x;
for(i=0;i<datalen;i++)
{
a0=*(dev_beamdata+bid*datalen+i);
//
for(j=1;j<6;j++)
{
if ((i+j)>=datalen)
{
//a1=*(data+i+j-dn);
break;
}
else
{
a1=*(dev_beamdata+bid*datalen+i+j);
}
if (a0<a1)
{
acc=acc+1;
}
}
if(j<5) //break
{
acc = 5;
}
a0=*(dev_beamdata+bid*datalen+i);
////
for(j=1;j<6;j++)
{
if ((i-j)<0)
{
//a1=*(data+i-j+dn);
break;
}
else
{
a1=*(dev_beamdata+bid*datalen+i-j);
}
if (a0<a1)
{
acc1=acc1+1;
}
}
if(j<5) //break
{
acc1 = 5;
}
if ((acc==5) && (acc1==5))
{
*(dev_valley+bid*datalen+i)=1;
}
acc=0;
acc1=0;
}
}
__global__ void DevPeakDetection(int *dev_peak,int *dev_valley,hipfftReal *dev_beamdata,hipfftReal *dev_preselected,hipfftReal *dev_selected,int datalen,float threshold,float thresholdabs)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int index = 0,ll=0;
float pvr1 = 1.0,pvr2 = 1.0;
bool foundfirst = false;
float maxval = 0.0,c=1500.0;
for(int ii=1;ii<datalen-1;ii++)
{
if(dev_peak[bid*datalen+ii] ==1)
{
for(ll=ii+1;ll<datalen;ll++)
{
if(dev_valley[bid*datalen+ll] == 1)
{
index = ll;
break;
}
}
if(ll<=datalen-1)
{
pvr1 = dev_beamdata[bid*datalen+ii] / dev_beamdata[bid*datalen+index];
}
for(ll=ii-1;ll>=0;ll--)
{
if(dev_valley[bid*datalen+ll] == 1)
{
index = ll;
break;
}
}
if(ll>=0)
{
pvr2 = dev_beamdata[bid*datalen+ii] / dev_beamdata[bid*datalen+index];
}
if(pvr1 >= threshold && pvr2 >= threshold && dev_beamdata[bid*datalen+ii] > thresholdabs)
{
dev_preselected[bid*datalen+ii]=1;
}
else
{
dev_preselected[bid*datalen+ii]=0;
}
}
else
{
dev_preselected[bid*datalen+ii]=0;
}
}
//
for(int ii=0;ii<datalen-1;ii++)
{
if(dev_preselected[bid*datalen+ii] == 1 && foundfirst == false)
{
foundfirst = true;
dev_selected[bid*4+0] = (DIRECTARRIVENUM*TL+ii) *1.0 / FS * c / 2;
//dev_selected[bid*3+0] = ii;
dev_selected[bid*4+1] = dev_beamdata[bid*datalen+ii];
}
if(dev_beamdata[bid*datalen+ii] > maxval)
{
dev_selected[bid*4+2] = (DIRECTARRIVENUM*TL+ii) *1.0 / FS * c / 2;
//dev_selected[bid*3+2] = ii;
dev_selected[bid*4+3] = dev_beamdata[bid*datalen+ii];
maxval = dev_beamdata[bid*datalen+ii];
}
}
if(bid == 4)
{
printf("%d:%.1f,%.1f,%.1f,%.1f\n",bid,dev_selected[bid*4+0],dev_selected[bid*4+1],dev_selected[bid*4+2],dev_selected[bid*4+3]);
}
}
__global__ void Envelope(hipfftReal *dev_envelopedata, hipfftReal *dev_delayfilterout, hipfftReal *dev_hilbout) //
{
int bid=blockIdx.x;
float xx=dev_delayfilterout[bid];
float yy=dev_hilbout[bid]/FRAMELEN;//
dev_envelopedata[bid]=sqrt(xx*xx+yy*yy);
}
void *ReadBoard0Data(void *lParam){
int fileindex = 0;
std::string FilePath = "/home/ubuntu/Documents/Active/"; //
std::string FileNamePre = "Board0_ADC_";
std::string FileIdx = std::to_string(fileindex);
std::string FileNameSur = ".bin";
std::string FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
int DataFileNum = 1;
FILE *fp = NULL;
int readbuf[TL*CHANNUM+3];
int CounterA = FRAMELEN;
int temp = 0;
bool foundpulse = false;
//QueryPerformanceFrequency(&nFreq);
if(DataBufA_B1 != NULL)
{
free(DataBufA_B1);
DataBufA_B1 = NULL;
}
DataBufA_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B1,0,FRAMELEN*CHANNUM*sizeof(int));
//QueryPerformanceCounter(&nBeginTime);
//117samples*16channels24bitint
for(int ii=0;ii<DataFileNum;ii++)
{
fileindex = ii;
FileIdx = std::to_string(fileindex);
FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
if(fp != NULL)
{
fclose(fp);
fp = NULL;
}
fp = fopen(FileName.c_str(),"rb");
int num=0;
for(int jj=0;jj<8e4;jj++)//
{
usleep(TL*1e6 / FS);//wait
fread(readbuf,sizeof(int),TL*CHANNUM+3,fp);
//
if(!foundpulse)
{
float dataval = 0.0;
for(int kk=0;kk<TL;kk++)
{
temp = readbuf[3+kk*CHANNUM+1];//2
temp = temp<<8;
temp = temp>>8;
dataval = temp*1.0/pow(2.0,23) * 2.5;
if(fabs(dataval) > 0.5)
{
foundpulse = true;
break;
}
}
}
if(foundpulse && num++ > DIRECTARRIVENUM) //510 510/17=30
{
memcpy(DataBufA_B1+FRAMELEN*CHANNUM-CounterA*CHANNUM,readbuf+3,TL*CHANNUM*sizeof(int));
CounterA = CounterA-TL;
if(CounterA == 0)
{
//
pthread_mutex_lock(&count_lock_Board0DataReady);
pthread_cond_signal(&cond_Board0DataReady);
count_Board0DataReady = count_Board0DataReady+1;
pthread_mutex_unlock(&count_lock_Board0DataReady);
foundpulse = false;
CounterA = FRAMELEN;
num=0;
printf("readboard0data.\n");
}
}
}
}
return NULL;
}
void *ActiveDataFormatting(void *lParam)
{
int temp = 0;
// int index=0;
// FILE *fp=NULL;
if(ChannDataBufA != NULL)
{
free(ChannDataBufA);
ChannDataBufA = NULL;
}
ChannDataBufA = (float *)malloc(FRAMELEN*CHANNUM*sizeof(float));
memset(ChannDataBufA,0,FRAMELEN*CHANNUM*sizeof(float));
if(ChannDataBuf != NULL)
{
free(ChannDataBuf);
ChannDataBuf = NULL;
}
ChannDataBuf = (float *)malloc(FRAMELEN*ELENUM*sizeof(float));
memset(ChannDataBuf,0,FRAMELEN*ELENUM*sizeof(float));
while(1)
{
pthread_mutex_lock(&count_lock_Board0DataReady);
while (count_Board0DataReady == 0)
{
pthread_cond_wait(&cond_Board0DataReady,&count_lock_Board0DataReady);
}
count_Board0DataReady = count_Board0DataReady -1;
pthread_mutex_unlock(&count_lock_Board0DataReady);
printf("dataformatting begin\n");
for(int ii=0;ii<CHANNUM;ii++)
{
for(int jj=0;jj<FRAMELEN;jj++)
{
temp = DataBufA_B1[jj*CHANNUM+ii];
temp = temp<<8;
temp = temp>>8;
if(ii==1 || ii==6 || ii==7 || ii==9 || ii==11)
{
ChannDataBufA[ii*FRAMELEN+jj] = -temp*1.0/pow(2.0,23) * 2.5;
}
else
{
ChannDataBufA[ii*FRAMELEN+jj] = temp*1.0/pow(2.0,23) * 2.5;
}
}
}
//4
memcpy(ChannDataBuf,ChannDataBufA,sizeof(float)*ELENUM*FRAMELEN);
pthread_mutex_lock(&count_lock_ActiveFrameDataReady);
pthread_cond_signal(&cond_ActiveFrameDataReady);
count_ActiveFrameDataReady = count_ActiveFrameDataReady+1;
pthread_mutex_unlock(&count_lock_ActiveFrameDataReady);
// std::string fname="/home/ubuntu/Documents/Active/tmp/formdata"+std::to_string(index++)+".bin";
// fp=fopen(fname.c_str(),"wb");
// fwrite(ChannDataBuf,sizeof(float),12*FRAMELEN,fp);
// fclose(fp);
// fp=NULL;
printf("dataformmating end\n");
}
}
void InitProcessing(){
//-----------------------------------Init();-------------------------------
//
for(int ii=0;ii<NREF;ii++)
{
float t=1.0*ii/FS;
rsRef[NREF-1-ii]=sin(2*PI*(90e3*t+0.5e7*t*t));
}
//
for(int jj=0;jj<6;jj++)
{
xEle[jj]=23e-3*sin(jj*PI/3);
zEle[jj]=23e-3*cos(jj*PI/3);
}
for(int jj=0;jj<6;jj++)
{
xEle[6+jj]=11.5e-3*sin(jj*PI/3);
zEle[6+jj]=11.5e-3*cos(jj*PI/3);
}
//
for(int ii=0;ii<3;ii++)
{
for(int jj=0;jj<9;jj++)
{
for (int kk=0;kk<12;kk++)
{
dTime[(ii*9+jj)*12+kk] = (xEle[kk]*sin(theta[ii])*cos(phi[jj])+zEle[kk]*cos(theta[ii]))/UWC*FS;
}
}
}
//---------------------------------------------------Init finished-----------------------------
}
void *ActiveSignalProcessing(void *lParam)
{
float temp[27*FRAMELEN]={0.0};
InitProcessing();
int FrameNum = 0;
int fIndex=0;
FILE * wfp=NULL;
std::string FileName="";
//------------------------------------------------
float h[FIRORDER+1] = {0.0};
float fl = 80e3f,fh = 120e3f;
hipError_t cudaStatus;
hipfftReal *dev_x=NULL; //12
hipfftReal *dev_x_s=NULL; //
hipfftReal *dev_h=NULL; //
hipfftComplex *dev_fft_x=NULL; //12FFT
hipfftComplex *dev_fft_h=NULL; //FFT
hipfftComplex *dev_fft_y=NULL; //FFT
hipfftReal *dev_y=NULL; //
hipfftReal *dev_chanbuff=NULL; //
//float *FilteredDataout = NULL;
// float *DownSamplingData = NULL;
hipfftHandle Hplan; //FFT
hipfftHandle Xplan; //FFT
hipfftHandle Yplan; //FFT
hipfftHandle HXplan; //HilbertFFT
hipfftHandle HYplan; //HilbertFFT
hipfftHandle MXplan;
hipfftHandle MHplan; //
hipfftHandle MYplan;
//_Longlong FiltDataFileIndex=0;
//----------------------------------------------------------------
//------------------------------------------------
hipfftReal *dev_dTime=NULL;
hipfftReal *dev_mat=NULL;//
hipfftComplex *dev_fft_mat=NULL;//
float *beamdata=NULL;
beamdata= (float *)malloc(FRAMELEN*PHINUM*THETANUM*sizeof(float));
memset(beamdata,0,FRAMELEN*PHINUM*THETANUM*sizeof(float));
hipfftReal *dev_delayfilterbuf=NULL; //
hipfftReal *dev_delayfilterout=NULL; //
hipfftReal *dev_delayFilter=NULL; //
int *dev_dI=NULL; //
hipfftReal *dev_dF=NULL; //
hipfftReal *dev_delaydata=NULL; //
hipfftReal *dev_matchdata=NULL; //
hipfftReal *dev_beamdata=NULL;
hipfftComplex *dev_fft_delaydata=NULL; //
hipfftComplex *dev_fft_matout=NULL; //
int *dev_peak = NULL; //
int *dev_valley = NULL; //
hipfftReal *dev_preselected = NULL; //
hipfftReal *dev_selected = NULL; //
int *peak = NULL;
int *valley = NULL;
//-----------------Hilbert-------------------------------
hipfftComplex *dev_matchdatafreq=NULL; //
cudaStatus = hipMalloc((void **)&dev_matchdatafreq, sizeof(hipfftComplex)*NHILBT);
hipMemset((void **)&dev_matchdatafreq,0,sizeof(hipfftComplex)*NHILBT);
hipfftComplex *dev_hilboutfreq=NULL; //
cudaStatus = hipMalloc((void **)&dev_hilboutfreq, sizeof(hipfftComplex)*NHILBT);
hipMemset((void **)&dev_hilboutfreq,0,sizeof(hipfftComplex)*NHILBT);
hipfftReal *dev_hilbout=NULL; //
cudaStatus = hipMalloc((void **)&dev_hilbout, sizeof(hipfftReal)*FRAMELEN);
hipMemset((void **)&dev_hilbout,0,sizeof(hipfftReal)*FRAMELEN);
hipfftReal *dev_envelopedata=NULL;//
cudaStatus = hipMalloc((void **)&dev_envelopedata, sizeof(hipfftReal)*FRAMELEN);
hipMemset((void **)&dev_envelopedata,0,sizeof(hipfftReal)*FRAMELEN);
//----------------------------------------------------------------
//----------------------------------------------------
hipfftPlan1d(&Hplan, NFIR, HIPFFT_R2C, 1);
hipfftPlan1d(&Xplan, NFIR, HIPFFT_R2C, 1);
hipfftPlan1d(&Yplan, NFIR, HIPFFT_C2R, 1);
hipfftPlan1d(&HXplan, NHILBT, HIPFFT_R2C, 1);
hipfftPlan1d(&HYplan, FRAMELEN, HIPFFT_C2R, 1);
hipfftPlan1d(&MXplan, NMAT, HIPFFT_R2C, 1);
hipfftPlan1d(&MHplan, NMAT, HIPFFT_R2C, 1);
hipfftPlan1d(&MYplan, NMAT, HIPFFT_C2R, 1);
cudaStatus = hipMalloc((void **)&dev_dTime, sizeof(hipfftReal)*(PHINUM*THETANUM*ELENUM));//
if (cudaStatus != hipSuccess)
{
printf (" dev_dTime hipMalloc Error! \n ");
}
hipMemset((void **)&dev_dTime,0,sizeof(hipfftReal)*(PHINUM*THETANUM*ELENUM));
hipMemcpy(dev_dTime,dTime,sizeof(hipfftReal)*(PHINUM*THETANUM*ELENUM),hipMemcpyHostToDevice);
cudaStatus = hipMalloc((void **)&dev_mat, sizeof(hipfftReal)*NMAT);//:
if (cudaStatus != hipSuccess)
{
printf (" dev_mat hipMalloc Error! \n ");
}
hipMemset((void **)&dev_mat,0,sizeof(hipfftReal)*NMAT);
hipMemcpy(dev_mat+NMAT-NREF,rsRef,sizeof(hipfftReal)*NREF,hipMemcpyHostToDevice);//
cudaStatus = hipMalloc((void **)&dev_fft_mat, sizeof(hipfftComplex)*NMAT);//
if (cudaStatus != hipSuccess)
{
printf (" dev_fft_mat hipMalloc Error! \n ");
}
hipMemset((void **)&dev_fft_mat,0,sizeof(hipfftComplex)*NMAT);
hipfftExecR2C(MHplan,(hipfftReal *)&dev_mat[0],(hipfftComplex *)&dev_fft_mat[0]);
cudaStatus = hipMalloc((void **)&dev_fft_matout, sizeof(hipfftComplex)*NMAT);//
if (cudaStatus != hipSuccess)
{
printf (" dev_fft_matout hipMalloc Error! \n ");
}
hipMemset((void **)&dev_fft_matout,0,sizeof(hipfftComplex)*NMAT);
cudaStatus = hipMalloc((void **)&dev_x, sizeof(hipfftReal)*(FRAMELEN*ELENUM));//
if (cudaStatus != hipSuccess)
{
printf (" dev_x hipMalloc Error! \n ");
}
hipMemset((void **)&dev_x,0,sizeof(hipfftReal)*FRAMELEN*ELENUM);
cudaStatus = hipMalloc((void **)&dev_x_s, sizeof(hipfftReal)*NFIR);//
if (cudaStatus != hipSuccess)
{
printf (" dev_x_s hipMalloc Error! \n ");
}
hipMemset((void **)&dev_x_s,0,sizeof(hipfftReal)*NFIR);
cudaStatus = hipMalloc((void **)&dev_h, sizeof(hipfftReal)*NFIR);//
if (cudaStatus != hipSuccess)
{
printf ("dev_h hipMalloc Error! \n ");
}
hipMemset((void **)&dev_h,0,sizeof(hipfftReal)*NFIR);
cudaStatus = hipMalloc((void **)&dev_y, sizeof(hipfftReal)*NFIR*ELENUM);//
if (cudaStatus != hipSuccess)
{
printf ("dev_y hipMalloc Error! \n ");
}
hipMemset((void **)&dev_y,0,sizeof(hipfftReal)*NFIR*ELENUM);
cudaStatus = hipMalloc((void **)&dev_fft_x,sizeof(hipfftComplex)*NFIR*ELENUM);//
if (cudaStatus != hipSuccess)
{
printf ("dev_fft_x hipMalloc Error! \n ");
}
hipMemset((void **)&dev_fft_x,0,sizeof(hipfftComplex)*NFIR*ELENUM);
cudaStatus = hipMalloc((void **)&dev_fft_h,sizeof(hipfftComplex)*NFIR);//
if (cudaStatus != hipSuccess)
{
printf ("dev_fft_h hipMalloc Error! \n ");
}
hipMemset((void **)&dev_fft_h,0,sizeof(hipfftComplex)*NFIR);
cudaStatus = hipMalloc((void **)&dev_fft_y,sizeof(hipfftComplex)*(ELENUM*NFIR));//
if (cudaStatus != hipSuccess)
{
printf ("dev_fft_y hipMalloc Error! \n ");
}
hipMemset((void **)&dev_fft_y,0,sizeof(hipfftComplex)*(ELENUM*NFIR));
cudaStatus = hipMalloc((void **)&dev_chanbuff,sizeof(hipfftReal)*FILTER_FRAME*ELENUM);//
if (cudaStatus != hipSuccess)
{
printf ("dev_chanbuff hipMalloc Error! \n ");
}
hipMemset((void **)&dev_chanbuff,0,sizeof(hipfftReal)*FILTER_FRAME*ELENUM);
fir1(FIRORDER,3,fl,fh,FS,5,h);
hipMemcpy(dev_h,h,sizeof(hipfftReal)*FIRORDER,hipMemcpyHostToDevice);
hipfftExecR2C(Hplan,(hipfftReal *)&dev_h[0],(hipfftComplex *)&dev_fft_h[0]);//dev_fft_h
//---------------------------------------------------------------
cudaStatus =hipMalloc((void **)&dev_delayfilterbuf,sizeof(hipfftReal)*FRAMELEN*ELENUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_delayfilterbuf hipMalloc Error! \n ");
}
hipMemset((void **)&dev_delayfilterbuf,0,sizeof(hipfftReal)*FRAMELEN*ELENUM);
cudaStatus =hipMalloc((void **)&dev_delayfilterout,sizeof(hipfftReal)*FRAMELEN*ELENUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_delayfilterout hipMalloc Error! \n ");
}
hipMemset((void **)&dev_delayfilterout,0,sizeof(hipfftReal)*FRAMELEN*ELENUM);
cudaStatus =hipMalloc((void **)&dev_delayFilter,sizeof(hipfftReal)*(2*M+1)*ELENUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_delayFilter hipMalloc Error! \n ");
}
hipMemset((void **)&dev_delayFilter,0,sizeof(hipfftReal)*(2*M+1)*ELENUM);
cudaStatus =hipMalloc((void **)&dev_dI,sizeof(int)*ELENUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_dI hipMalloc Error! \n ");
}
hipMemset((void **)&dev_dI,0,sizeof(int)*ELENUM);
cudaStatus =hipMalloc((void **)&dev_dF,sizeof(hipfftReal)*ELENUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_dF hipMalloc Error! \n ");
}
hipMemset((void **)&dev_dF,0,sizeof(hipfftReal)*ELENUM);
cudaStatus =hipMalloc((void **)&dev_delaydata,sizeof(hipfftReal)*FRAMELEN);
if (cudaStatus != hipSuccess)
{
printf ("dev_delaydata hipMalloc Error! \n ");
}
hipMemset((void **)&dev_delaydata,0,sizeof(hipfftReal)*FRAMELEN);
cudaStatus =hipMalloc((void **)&dev_matchdata,sizeof(hipfftReal)*NMAT);
if (cudaStatus != hipSuccess)
{
printf ("dev_matchdata hipMalloc Error! \n ");
}
hipMemset((void **)&dev_matchdata,0,sizeof(hipfftReal)*NMAT);
cudaStatus =hipMalloc((void **)&dev_beamdata,sizeof(hipfftReal)*THETANUM*PHINUM*FRAMELEN);
if (cudaStatus != hipSuccess)
{
printf ("dev_beamdata hipMalloc Error! \n ");
}
hipMemset((void **)&dev_beamdata,0,sizeof(hipfftReal)*THETANUM*PHINUM*FRAMELEN);
cudaStatus = hipMalloc((void **)&dev_fft_delaydata,sizeof(hipfftComplex)*NMAT);//
if (cudaStatus != hipSuccess)
{
printf ("dev_fft_delaydata hipMalloc Error! \n ");
}
hipMemset((void **)&dev_fft_delaydata,0,sizeof(hipfftComplex)*NMAT);
cudaStatus = hipMalloc((void **)&dev_peak,sizeof(int)*THETANUM*PHINUM*FRAMELEN);
if (cudaStatus != hipSuccess)
{
printf ("dev_peak hipMalloc Error! \n ");
}
hipMemset((void **)&dev_peak,0,sizeof(int)*THETANUM*PHINUM*FRAMELEN);
cudaStatus = hipMalloc((void **)&dev_valley,sizeof(int)*THETANUM*PHINUM*FRAMELEN);
if (cudaStatus != hipSuccess)
{
printf ("dev_valley hipMalloc Error! \n ");
}
hipMemset((void **)&dev_valley,0,sizeof(int)*THETANUM*PHINUM*FRAMELEN);
cudaStatus = hipMalloc((void **)&dev_preselected,sizeof(hipfftReal)*THETANUM*PHINUM*FRAMELEN);
if (cudaStatus != hipSuccess)
{
printf ("dev_preselected hipMalloc Error! \n ");
}
hipMemset((void **)&dev_preselected,0,sizeof(hipfftReal)*THETANUM*PHINUM*FRAMELEN);
cudaStatus = hipMalloc((void **)&dev_selected,sizeof(hipfftReal)*THETANUM*PHINUM*4);
if (cudaStatus != hipSuccess)
{
printf ("dev_selected hipMalloc Error! \n ");
}
hipMemset((void **)&dev_selected,0,sizeof(hipfftReal)*THETANUM*PHINUM*4);
peak = (int *)malloc(THETANUM*PHINUM*FRAMELEN*sizeof(int));
memset(peak,0,THETANUM*PHINUM*FRAMELEN*sizeof(int));
valley = (int *)malloc(THETANUM*PHINUM*FRAMELEN*sizeof(int));
memset(valley,0,THETANUM*PHINUM*FRAMELEN*sizeof(int));
//----------------------------------------------------
//------------------------------------------------------
hipEvent_t start1;
hipEvent_t stop1;
float msecTotal = 0.0f;
hipEventCreate(&start1);
hipEventCreate(&stop1);
//----------------------------------------------------------------
while(1)
{
FileName="/home/ubuntu/Documents/Active/tmp/beamdata"+std::to_string(fIndex++)+".bin";
wfp=fopen(FileName.c_str(),"wb");
hipEventRecord(start1,NULL);
printf("wait for process\n");
pthread_mutex_lock(&count_lock_ActiveFrameDataReady);
while (count_ActiveFrameDataReady == 0)
{
pthread_cond_wait(&cond_ActiveFrameDataReady,&count_lock_ActiveFrameDataReady);
}
count_ActiveFrameDataReady = count_ActiveFrameDataReady -1;
pthread_mutex_unlock(&count_lock_ActiveFrameDataReady);
FrameNum++;
hipMemcpy(dev_x,ChannDataBuf,sizeof(hipfftReal)*FRAMELEN*ELENUM,hipMemcpyHostToDevice);//ChannDataBufdev_x
//-----------------------------------------(1) (2.4ms)---------------------------------------------------
//
for(int jj=0;jj<ELENUM;jj++)
{
hipMemcpy(dev_x_s,dev_x+FRAMELEN*jj,sizeof(hipfftReal)*FRAMELEN,hipMemcpyDeviceToDevice);//dev_xjjdev_x_s
hipfftExecR2C(Xplan,(hipfftReal *)&dev_x_s[0],(hipfftComplex *)&dev_fft_x[jj*NFIR]);//
}
//
//
hipLaunchKernelGGL(( ActiveFilter), dim3(ELENUM*NFIR/THREADNUMPERBLK),dim3(THREADNUMPERBLK), 0, 0, dev_fft_x,dev_fft_h,dev_fft_y,NFIR);
//
for(int jj=0;jj<ELENUM;jj++)
{
hipfftExecC2R(Yplan,dev_fft_y+jj*NFIR,dev_y+jj*NFIR);
hipMemcpy((float*)&dev_chanbuff[jj*FILTER_FRAME],(hipfftReal*)&dev_y[jj*NFIR+FIRORDER/2],sizeof(float)*FILTER_FRAME,hipMemcpyDeviceToDevice);
}
//QueryPerformanceCounter(&nEndTime);
//hipEventRecord(stop1,NULL);
//hipEventSynchronize(stop1);
//-----------------------------------------(1) ---------------------------------------------------
//-----------------------------------------(2) (223ms)---------------------------------------------------
//hipEventRecord(start1,NULL);
//
for(int ii=0;ii<THETANUM;ii++)
{//
for(int jj=0;jj<PHINUM;jj++)
{//
int index=(ii*9+jj)*12;//
hipLaunchKernelGGL(( ActiveDelayFilterGen), dim3(ELENUM),dim3(2*M+1), 0, 0, dev_delayFilter,dev_dI,dev_dF,dev_dTime,index);
for (int kk=0;kk<12;kk++)
{
int DI=(int)dTime[index+kk];//
float DF=dTime[index+kk];
if(DI>=0)
{
hipMemcpy(dev_delayfilterbuf+kk*FRAMELEN+DI,dev_chanbuff+kk*FRAMELEN,sizeof(hipfftReal)*(FRAMELEN-DI),hipMemcpyDeviceToDevice);
}
else
{
hipMemcpy(dev_delayfilterbuf+kk*FRAMELEN,dev_chanbuff+kk*FRAMELEN-DI,sizeof(hipfftReal)*(FRAMELEN+DI),hipMemcpyDeviceToDevice);
}
if(DF > 0.0001)
{
hipLaunchKernelGGL(( ActiveFineDelayFilter), dim3(FRAMELEN),dim3(2*M+1), 0, 0, dev_delayfilterbuf+kk*FRAMELEN,dev_delayfilterout+kk*FRAMELEN,dev_delayFilter+kk*(2*M+1));
//hipMemcpy(dev_delayfilterout+kk*FRAMELEN,dev_delayfilterbuf+kk*FRAMELEN,sizeof(hipfftReal)*FRAMELEN,hipMemcpyDeviceToDevice);
}
else
{
hipMemcpy(dev_delayfilterout+kk*FRAMELEN,dev_delayfilterbuf+kk*FRAMELEN,sizeof(hipfftReal)*FRAMELEN,hipMemcpyDeviceToDevice);
}
}
hipLaunchKernelGGL(( MatrixSumRow), dim3(FRAMELEN),dim3(1), 0, 0, dev_delayfilterout,dev_delaydata,ELENUM,FRAMELEN);
//
//==========================================
hipfftExecR2C(MXplan,dev_delaydata,dev_fft_delaydata);
hipLaunchKernelGGL(( VectorMultiplier), dim3(NMAT),dim3(1), 0, 0, dev_fft_delaydata,dev_fft_mat,dev_fft_matout);
hipfftExecC2R(MYplan,dev_fft_matout,dev_matchdata);
//Hilbert(0.5ms)
//1. dev_matchdata -> dev_matchdatafreq
hipfftExecR2C(HXplan,dev_matchdata,dev_matchdatafreq);
//2. dev_matchdatafreq -> dev_hilboutfreq
hipLaunchKernelGGL(( HilbFilt), dim3(NHILBT),dim3(1), 0, 0, dev_hilboutfreq, dev_matchdatafreq, NHILBT/2);
//3. dev_hilboutfreq -> dev_hilbout
hipfftExecC2R(HYplan,dev_hilboutfreq,dev_hilbout);
//4. dev_matchdata,dev_hilbout ->dev_envelopedata
hipLaunchKernelGGL(( Envelope), dim3(FRAMELEN),dim3(1), 0, 0, dev_envelopedata,dev_matchdata, dev_hilbout);
// dev_beamdata
hipMemcpy((hipfftReal *)&dev_beamdata[(ii*9+jj)*FRAMELEN],dev_envelopedata,sizeof(hipfftReal)*FRAMELEN,hipMemcpyDeviceToDevice);
}
}
hipMemset((void **)&dev_peak,0,sizeof(hipfftReal)*THETANUM*PHINUM*FRAMELEN);
hipMemset((void **)&dev_valley,0,sizeof(hipfftReal)*THETANUM*PHINUM*FRAMELEN);
hipLaunchKernelGGL(( DevFindPeak), dim3(PHINUM*THETANUM),dim3(1), 0, 0, dev_beamdata,dev_peak,FRAMELEN);
hipMemcpy(peak,dev_peak,sizeof(hipfftReal)*THETANUM*PHINUM*FRAMELEN,hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( DevFindValley), dim3(PHINUM*THETANUM),dim3(1), 0, 0, dev_beamdata,dev_valley,FRAMELEN);
hipMemcpy(valley,dev_valley,sizeof(hipfftReal)*THETANUM*PHINUM*FRAMELEN,hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( DevPeakDetection), dim3(PHINUM*THETANUM),dim3(1), 0, 0, dev_peak,dev_valley,dev_beamdata,dev_preselected,dev_selected,FRAMELEN,3.0,10000);
hipEventRecord(stop1,NULL);
hipEventSynchronize(stop1);
hipMemcpy(beamdata,dev_beamdata,sizeof(hipfftReal)*6800*27,hipMemcpyDeviceToHost);//THETANUM*PHINUM*FRAMELEN
hipEventElapsedTime(&msecTotal,start1,stop1);
hipMemcpy(temp,dev_beamdata,sizeof(hipfftReal)*6800*27,hipMemcpyDeviceToHost);
fwrite(temp,sizeof(float),6800*27,wfp);
//printf("%d:%.3f:\n",FrameNum,msecTotal);
printf("%d:%.3f:\n",FrameNum,msecTotal);
printf("processing finished.\n");
fclose(wfp);
wfp=NULL;
//-----------------------------------------(2) -----------------------------------------------
}
}
void *ActiveReceiveNetwork(void *lParam)
{
char errBuf[PCAP_ERRBUF_SIZE], *device;
pcap_t *handle;
bpf_u_int32 mask;
bpf_u_int32 net;
struct bpf_program filter;
char filter_app[] = "udp dst port 0"; //setting the filter package
struct pcap_pkthdr packet;
const u_char *pktStr;
char packtype = 0;
short portnumber = 0;
char sourceid = 0;
char FramenumN1 = -1, FramenumN2 = -1;
char LastFramenumN1 = 0, LastFramenumN2 = 0;
int readbufb1[TL*CHANNUM+1],readbufb2[TL*CHANNUM+1];
int BUF_FLAG_B1=0,BUF_FLAG_B2;
int *pBuf_B1 = NULL,*pBuf_B2 = NULL;
int *pCounter_B1 = NULL,*pCounter_B2 = NULL;
int CounterA_B1 = FRAMELEN;
int CounterA=FRAMELEN;
int temp = 0;
int FrameNum1 = 0,FrameNum2 = 0, FrameNum = 0;
bool foundpulse = false;
int num=0;
if(DataBufA_B1 != NULL)
{
free(DataBufA_B1);
DataBufA_B1 = NULL;
}
DataBufA_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B1,0,FRAMELEN*CHANNUM*sizeof(int));
//get the name of the first device suitable for capture
device = pcap_lookupdev(errBuf);
if ( device )
{
printf("success: device: %s\n",device);
}
else
{
printf("error: %s\n",errBuf);
return 0;
}
//open network device for packet capture
handle = pcap_open_live(device,BUFSIZ,1,0,errBuf);
//look up into from the capture device
pcap_lookupnet(device,&net,&mask,errBuf);
printf("net=%x mask=%x\n",net,mask);
//compiles the filter expression into a bpf filter rogram
printf("compiles the filter expression into a bpf filter program\r\n");
pcap_compile(handle,&filter,filter_app,0,net);
//load the filter program into the packet capture device
printf("load the filter program into the packet capture device\r\n");
pcap_setfilter(handle,&filter);
while (1)
{
//printf("before Received data!\n");
pktStr = pcap_next(handle,&packet);
//printf("Received data!\n");
if(pktStr != NULL)
{
//printf("Received data!\n");
//
memcpy((char *)&portnumber,pktStr+37,sizeof(char));
memcpy((char *)&portnumber+1,pktStr+36,sizeof(char));
if (portnumber == DEST_PORT)
{
//
memcpy(&packtype,pktStr+45,sizeof(char));
memcpy(&sourceid,pktStr+43,sizeof(char));
if (packtype == 0x10) // if packet is ADC packet
{
if(sourceid == 0)
{
FrameNum1++;
memcpy(readbufb1,pktStr+42,(TL*CHANNUM+1)*sizeof(int));
FramenumN1 = *(pktStr+44);
FramenumN1 = FramenumN1 >> 2;
if (FrameNum1 == 1)
{
LastFramenumN1 = FramenumN1;
}
else
{
if (FramenumN1 != LastFramenumN1+1 && FramenumN1+63 != LastFramenumN1)
{
printf("Lost Board1 data package!\n");
}
LastFramenumN1 = FramenumN1;
}
}
// if(sourceid == 2)
// {
// FrameNum2++;
// memcpy(readbufb2,pktStr+42,(TL*CHANNUM+1)*sizeof(int));
// FramenumN2 = *(pktStr+44);
// FramenumN2 = FramenumN2 >> 2;
// if (FrameNum2 == 1)
// {
// LastFramenumN2 = FramenumN2;
// }
// else
// {
// if (FramenumN2 != LastFramenumN2+1 && FramenumN2+63 != LastFramenumN2)
// {
// printf("Lost Board2 data package!\n");
// }
// LastFramenumN2 = FramenumN2;
// }
// }
//
if(!foundpulse)
{
float dataval = 0.0;
for(int kk=0;kk<TL;kk++)
{
temp = readbufb1[3+kk*CHANNUM+1];//2
temp = temp<<8;
temp = temp>>8;
dataval = temp*1.0/pow(2.0,23) * 2.5;
if(fabs(dataval) > 0.5)
{
foundpulse = true;
break;
}
}
}
if(foundpulse && num++ > DIRECTARRIVENUM) //510 510/17=30
{
memcpy(DataBufA_B1+FRAMELEN*CHANNUM-CounterA*CHANNUM,readbufb1+3,TL*CHANNUM*sizeof(int));
CounterA = CounterA-TL;
if(CounterA == 0)
{
//
pthread_mutex_lock(&count_lock_Board0DataReady);
pthread_cond_signal(&cond_Board0DataReady);
count_Board0DataReady = count_Board0DataReady+1;
pthread_mutex_unlock(&count_lock_Board0DataReady);
foundpulse = false;
CounterA = FRAMELEN;
num=0;
}
}
}
}
}
//printf("ReceiveNetworkData Finished!\n");
//pthread_mutex_lock(&count_lock_BoardDataReady);
//pthread_cond_signal(&cond_BoardDataReady);
//count_BoardDataReady = count_BoardDataReady+1;
//pthread_mutex_unlock(&count_lock_BoardDataReady);
}
}
|
9cf8c44871fb628ed650c003f48659f293eab31e.cu
|
//nvcc colliprev.cu -o test -lstdc++ -lpthread -lcufft -lpcap -std=c++11
#include <pcap.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <pthread.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <netinet/ip_icmp.h>
#include <net/ethernet.h>
#include <netinet/if_ether.h>
#include <netinet/ether.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <errno.h>
#include <fcntl.h>
#include <math.h>
#include <memory.h>
#include <malloc.h>
#include <iostream>
//--------------CUDA----------------
#include <cuda_runtime.h>
#include <cufft.h>
//-------------------------------------
// ----------------------------------------
#define PI 3.1415926f
#define UWC 1500.0f //
#define FS 250000 //
#define threadsPerBlock 512
#define d 0.07f
#define FL 90000.0f
#define FH 100000.0f
#define TL 17
#define CHANNUM 16
#define FRAMELEN 6800
#define DOWNSAMPLE 1
#define FIRORDER 256
#define FILTER_FRAME FRAMELEN
#define NFFT FRAMELEN //
#define BEAMNUM 91
#define THREADNUMPERBLK 200
#define ARRAYNUM 15
#define STARTBEAM 15
#define ENDBEAM 75
#define MAXTRACETARNUM 3
#define M 3
#define ONLINEMODE 0
#define FILEMODE 1
#define DEST_PORT 0
#define PSD_LEN 20
#define THETANUM 3 //俯仰角个数
#define PHINUM 9 //方位角个数
#define NREF 250 //参考信号长度
#define FRAMELEN 6800
#define NHILBT FRAMELEN //希尔伯特变换频点数
#define ELENUM 12 //阵元个数
#define NNZERO 200 //信号补零点数(弥补滤波器时延)
#define NFIR (FILTER_FRAME+NNZERO) //滤波点数
#define NMAT FRAMELEN //匹配滤波器点数
#define DIRECTARRIVENUM 30
// -----------------------------------------------------
void *ReadBoard0Data(void *lParam);
void *ActiveReceiveNetwork(void *lParam);
void *ActiveDataFormatting(void *lParam);
void *ActiveSignalProcessing(void *lParam);
//------------------------------------------------------
pthread_mutex_t count_lock_Board0DataReady;
pthread_mutex_t count_lock_ActiveFrameDataReady;
pthread_cond_t cond_Board0DataReady;
pthread_cond_t cond_ActiveFrameDataReady;
unsigned int count_Board0DataReady;
unsigned int count_ActiveFrameDataReady;
//-----------------------------------------------------
int *DataBufA_B1 = NULL;//16Channel
float *ChannDataBufA=NULL;//16Channel
float *ChannDataBuf=NULL;//12Channel
//---------------------------------------------------
int fir1(int n,int band,float fl,float fh,float fs,int wn, float *h);
float window(int type,int n,int i,float beta);
float kaiser(int i,int n,float beta);
float bessel0(float x);
void findpeak(float *data, int *p,int dn);
void findvalley(float *data, int *p,int dn);
bool peakdetection(int beamidx,float *be,int *valley,float threshold);
void rbub(float *p,int *idx,int n);
// -----------------------------------------------------------
float rsRef[NREF]={0.0};//翻转参考信号
float theta[3]={1.3963, 1.5708, 1.7453};//俯仰角:80°-100°
float phi[9]={0.8727, 1.0472, 1.2217, 1.3963 , 1.5708, 1.7453 , 1.9199 , 2.0944 , 2.2689};//方位角
float xEle[12]={0.0};//阵元x坐标
float zEle[12]={0.0};//阵元z坐标
float dTime[THETANUM*PHINUM*ELENUM]={0.0};//延时
// -----------------------------------------------------------
int main(){
pthread_t t_ActiveReceiveNetworkData;
pthread_t t_ActiveDataFormatting;
pthread_t t_ActiveSignalProcessing;
pthread_t t_ReadBoard0Data;
cond_Board0DataReady = PTHREAD_COND_INITIALIZER;
cond_ActiveFrameDataReady = PTHREAD_COND_INITIALIZER;
count_lock_Board0DataReady = PTHREAD_MUTEX_INITIALIZER;
count_lock_ActiveFrameDataReady = PTHREAD_MUTEX_INITIALIZER;
pthread_create(&t_ActiveSignalProcessing,NULL,ActiveSignalProcessing,(void *)NULL);
pthread_create(&t_ActiveDataFormatting,NULL,ActiveDataFormatting,(void *)NULL);
#if ONLINEMODE
pthread_create(&t_ActiveReceiveNetworkData,NULL,ActiveReceiveNetwork,(void *)NULL);
#endif
#if FILEMODE
pthread_create(&t_ReadBoard0Data,NULL,ReadBoard0Data,(void *)NULL);
#endif
pthread_join(t_ActiveSignalProcessing, NULL);
return 0;
}
int fir1(int n,int band,float fl,float fh,float fs,int wn, float *h)
{
int i,n2,mid;
float sum = 0;
float s,wc1,wc2,beta = 0,delay;
float fln = fl / fs;
float fhn = fh / fs;
beta = 6;
if((n%2)==0)
{
n2=n/2-1;
mid=1;
}
else
{
n2=n/2;
mid=0;
}
delay=n/2.0;
wc1=2.0*PI*fln;
if(band>=3) wc2=2.0*PI*fhn;
switch(band)
{
case 1://
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc1*s)/(PI*s))*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=wc1/PI;
for(i=0;i<=n;i++)
{
sum=sum+*(h+i);
}
for(i=0;i<=n;i++)
{
*(h+i)=*(h+i)/fabs(sum);
}
break;
}
case 2: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(PI*s)-sin(wc1*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=1.0-wc1/PI;
break;
}
case 3: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc2*s)-sin(wc1*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=(wc2-wc1)/PI;
break;
}
case 4: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc1*s)+sin(PI*s)-sin(wc2*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=(wc1+PI-wc2)/PI;
break;
}
}
return 0;
}
float window(int type,int n,int i,float beta)
{
int k;
float w=1.0;
switch(type)
{
case 1: //
{
w=1.0;
break;
}
case 2: //
{
k=(n-2)/10;
if(i<=k) w=0.5*(1.0-cos(i*PI/(k+1)));
if(i>n-k-2) w=0.5*(1.0-cos((n-i-1)*PI/(k+1)));
break;
}
case 3: //
{
w=1.0-fabs(1.0-2*i/(n-1.0));
break;
}
case 4: //
{
w=0.5*(1.0-cos(2*i*PI/(n-1.0)));
break;
}
case 5: //
{
w=0.54-0.46*cos(2*i*PI/(n-1.0));
break;
}
case 6: //
{
w=0.42-0.5*cos(2*i*PI/(n-1.0))+0.08*cos(4*i*PI/(n-1.0));
break;
}
case 7: //
{
w=kaiser(i,n,beta);
break;
}
}
return(w);
}
float kaiser(int i,int n,float beta) //
{
float a,w,a2,b1,b2,beta1;
b1=bessel0(beta);
a=2.0*i/(float)(n-1)-1.0;
a2=a*a;
beta1=beta*sqrt(1.0-a2);
b2=bessel0(beta1);
w=b2/b1;
return(w);
}
float bessel0(float x) //
{
int i;
float dd,y,d2,sum = 0;
y=x/2.0;
dd=1.0;
for(i=1;i<=25;i++)
{
dd=dd*y/i;
d2=dd*dd;
sum=sum+d2;
if(d2<sum*(1.0e-8)) break;
}
return(sum);
}
__global__ void PhiShiftFactorGen(cufftComplex *XNSS)
{
int bid = 0,tid = 0;
float tt = 0.0f;
float angle=0.0f;
float det[ARRAYNUM];
float MovePoints[ARRAYNUM];
bid = blockIdx.x;
tid = threadIdx.x;
angle=float(tid*PI/(BEAMNUM-1));
for(int i=0;i<ARRAYNUM;i++)
{
det[i]=i*d*cos(angle)/UWC;
MovePoints[i]=det[i]*FS/DOWNSAMPLE;
tt=MovePoints[i]*2*PI*bid/NFFT;
XNSS[tid*ARRAYNUM*NFFT/2+i*NFFT/2+bid].x = cos(tt);
XNSS[tid*ARRAYNUM*NFFT/2+i*NFFT/2+bid].y = sin(tt);
}
}
void findpeak(float *data, int *p,int dn)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
for(i=0;i<dn;i++)
{
a0=*(data+i);
//
for(j=1;j<11;j++)
{
if ((i+j)>=dn)
{
a1=*(data+i+j-dn);
}
else
{
a1=*(data+i+j);
}
if (a0>a1)
{
acc=acc+1;
}
}
a0=*(data+i);
//
for(j=1;j<11;j++)
{
if ((i-j)<0)
{
a1=*(data+i-j+dn);
}
else
{
a1=*(data+i-j);
}
if (a0>a1)
{
acc1=acc1+1;
}
}
if ((acc==10) && (acc1==10))
{
*(p+i)=1;
}
acc=0;
acc1=0;
}
}
void findvalley(float *data, int *p,int dn)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
for(i=0;i<dn;i++)
{
a0=*(data+i);
//
for(j=1;j<6;j++)
{
if ((i+j)>=dn)
{
break;
}
else
{
a1=*(data+i+j);
}
if (a0<a1)
{
acc=acc+1;
}
}
if(j<5) //
{
acc = 5;
}
a0=*(data+i);
//
for(j=1;j<6;j++)
{
if ((i-j)<0)
{
break;
}
else
{
a1=*(data+i-j);
}
if (a0<a1)
{
acc1=acc1+1;
}
}
if(j<5) //
{
acc1 = 5;
}
if ((acc==5) && (acc1==5))
{
*(p+i)=1;
}
acc=0;
acc1=0;
}
}
bool peakdetection(int beamidx,float *be,int *valley,float threshold)
{
int index = 0,ll=0;
float pvr1 = 1.0,pvr2 = 1.0;
if(beamidx >= STARTBEAM && beamidx <= ENDBEAM)
{
for(ll=beamidx+1;ll<BEAMNUM;ll++)
{
if(valley[ll] == 1)
{
index = ll;
break;
}
}
if(ll<=BEAMNUM-1)
{
pvr1 = be[beamidx] / be[index];
}
for(ll=beamidx-1;ll>=0;ll--)
{
if(valley[ll] == 1)
{
index = ll;
break;
}
}
if(ll>=0)
{
pvr2 = be[beamidx] / be[index];
}
if(pvr1 >= threshold && pvr2 >= threshold)
{
return true;
}
else
{
return false;
}
}
else
{
return false;
}
}
void rbub(float *p,int *idx,int n)
{
int m,k,j,i,xx;
float dd;
k=0;
m=n-1;
while (k<m)
{
j=m-1; m=0;
for(i=k; i<=j; i++)
{
if(p[i]<p[i+1])
{
dd=p[i];
p[i]=p[i+1];
p[i+1]=dd;
xx = idx[i];
idx[i] = idx[i+1];
idx[i+1] = xx;
m=i;
}
}
j=k+1;
k=0;
for (i=m; i>=j; i--)
{
if(p[i-1]<p[i])
{
dd=p[i];
p[i]=p[i-1];
p[i-1]=d;
xx = idx[i];
idx[i] = idx[i-1];
idx[i-1] = xx;
k=i;
}
}
}
return;
}
__global__ void FD_Beamform(cufftComplex *dev_fft,cufftReal *dev_energy,cufftComplex *PhiArray,int nfl,int nfh)
{
__shared__ float Mabs[THREADNUMPERBLK];
float tempX=0.0f;
float tempY=0.0f;
cuComplex XNSS;
cuComplex XFFTafterPinYi;
float ax = 0.0f,ay=0.0f,bx=0.0f,by=0.0f;
float energyEachBoShu = 0.0f;
int bid = 0,tid = 0;
int beamidx = 0, freqidx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
beamidx = bid % BEAMNUM;
freqidx = bid / BEAMNUM*THREADNUMPERBLK+tid;
if(tid==0)
{
memset(Mabs,0,sizeof(float)*THREADNUMPERBLK);
}
__syncthreads();
//
tempX=0.0;
tempY=0.0;
for(int i=0;i<ARRAYNUM;i++)
{
XNSS.x=PhiArray[beamidx*ARRAYNUM*(NFFT/2)+i*(NFFT/2)+freqidx].x;
XNSS.y=PhiArray[beamidx*ARRAYNUM*(NFFT/2)+i*(NFFT/2)+freqidx].y;
ax=dev_fft[i*(NFFT/2+1)+freqidx].x;
ay=dev_fft[i*(NFFT/2+1)+freqidx].y;
bx=XNSS.x;
by=XNSS.y;
if (freqidx>= nfl && freqidx<=nfh)
{
XFFTafterPinYi.x=ax*bx-ay*by;
XFFTafterPinYi.y=ax*by+bx*ay;
}
else
{
XFFTafterPinYi.x=0;
XFFTafterPinYi.y=0;
}
tempX=tempX+ XFFTafterPinYi.x;
tempY=tempY+ XFFTafterPinYi.y;
}
Mabs[tid]=tempX*tempX+tempY*tempY;
//
__syncthreads();
//
if(tid==0)
{
energyEachBoShu=0.0f;
for(int k=0;k<THREADNUMPERBLK;k++)
{
energyEachBoShu=energyEachBoShu+Mabs[k];
}
dev_energy[bid]= energyEachBoShu;
}
}
__global__ void MatrixSumRow(cufftReal *dev_energy,cufftReal *sum_energy,int nrow,int ncol)
{
int bid = 0;
int row = 0,col = 0;
float sum = 0.0;
bid = blockIdx.x;
row = nrow;
col = ncol;
for(int ii = 0;ii<row;ii++)
{
sum = sum+dev_energy[ii*col+bid];
}
sum_energy[bid] = sum;
}
__global__ void DownSamplingFilter(cufftComplex *dev_fft_sig,cufftComplex *dev_fft_filter,cufftComplex *dev_fft_yk,int FFTN)//needchange
{
int bid = 0,tid = 0;
cuComplex Sigk;
cuComplex Hk;
int chanIdx = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
chanIdx = bid % (CHANNUM*2);
freqIdx = bid / (CHANNUM*2)*THREADNUMPERBLK+tid;
Sigk.x = dev_fft_sig[chanIdx*FFTN+freqIdx].x;
Sigk.y = dev_fft_sig[chanIdx*FFTN+freqIdx].y;
Hk.x = dev_fft_filter[freqIdx].x;
Hk.y = dev_fft_filter[freqIdx].y;
dev_fft_yk[chanIdx*FFTN+freqIdx].x = Sigk.x*Hk.x-Sigk.y*Hk.y;
dev_fft_yk[chanIdx*FFTN+freqIdx].y = Sigk.x*Hk.y+Sigk.y*Hk.x;
}
__global__ void DelayFilterGen(float *h,int m,float theta,float *tau,int *dI)
{
int bid = 0,tid = 0;
int k=0;
float dfs = 0.0;
int DI = 0;
__shared__ float sum;
bid = blockIdx.x;
tid = threadIdx.x;
if(tid == 0)
{
sum = 0.0;
dfs = bid*d*cos(theta/180.0*PI)/UWC*(FS/DOWNSAMPLE);
DI = int(bid*d*cos(theta/180.0*PI)/UWC*(FS/DOWNSAMPLE)+0.5);
tau[bid] =dfs-DI;
dI[bid] = DI;
//printf("bid=%d,m=%d,theta = %.3f,dfs = %.3f,DI = %d\n",bid,m,theta,dfs,DI);
}
//
__syncthreads();
k = tid-m;
h[bid*(2*m+1)+tid] = sin(k*1.0*PI-tau[bid]*PI+0.000001)/(k*1.0*PI-tau[bid]*PI+0.000001);
//
__syncthreads();
if(tid == 0)
{
for(int k=0;k<2*m+1;k++)
{
sum = sum + h[bid*(2*m+1)+k];
}
}
__syncthreads();
h[bid*(2*m+1)+tid] = h[bid*(2*m+1)+tid]/sum;
}
__global__ void FineDelayFilter(cufftReal *dev_xin,cufftReal *dev_yout,cufftReal *delayfilter,int m)
{
int bid,tid;
float x=0.0,h=0.0;
float sum = 0.0;
bid = blockIdx.x;
tid = threadIdx.x;
__shared__ float y[2*M+1];
if(tid == 0)
{
for(int ii=0;ii<2*m;ii++)
{
y[ii] = 0.0;
}
}
if(bid-2*m+tid >= 0 && bid-2*m+tid < (FILTER_FRAME/DOWNSAMPLE))
{
x = dev_xin[bid-2*m+tid];
}
if(2*m-tid >=0)
{
h = delayfilter[2*m-tid];
}
y[tid] = x*h;
//if(bid == 24855)
//{
// printf("bid = %d,x=%.8f,h=%.8f,y=%.8f\n",bid,x,h,y);
//}
//
__syncthreads();
if(tid == 0)
{
sum = 0.0;
for(int jj=0;jj<2*m+1;jj++)
{
sum = sum + y[jj];
}
dev_yout[bid] = sum;
//if(bid == 24855)
//{
// printf("bid = %d,dev_yout=%.8f\n",bid,dev_yout[bid]);
//}
}
}
__global__ void Psd(cufftComplex *Xk,cufftReal *Xabs, int N)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
Xabs[freqIdx] = (Xk[freqIdx].x*Xk[freqIdx].x+Xk[freqIdx].y*Xk[freqIdx].y) / N;
}
__global__ void ActiveFilter(cufftComplex *dev_fft_sig,cufftComplex *dev_fft_filter,cufftComplex *dev_fft_yk,int FFTN)
{
int bid = 0,tid = 0;
cuComplex Sigk;
cuComplex Hk;
int chanIdx = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
chanIdx = bid % (ELENUM);
freqIdx = bid / ELENUM*THREADNUMPERBLK+tid;
Sigk.x = dev_fft_sig[chanIdx*FFTN+freqIdx].x;
Sigk.y = dev_fft_sig[chanIdx*FFTN+freqIdx].y;
Hk.x = dev_fft_filter[freqIdx].x;
Hk.y = dev_fft_filter[freqIdx].y;
dev_fft_yk[chanIdx*FFTN+freqIdx].x = Sigk.x*Hk.x-Sigk.y*Hk.y;
dev_fft_yk[chanIdx*FFTN+freqIdx].y = Sigk.x*Hk.y+Sigk.y*Hk.x;
}
__global__ void ActiveDelayFilterGen(cufftReal *h, int *dI, cufftReal *dF, float *delaytime,int index)//所有通道小数延时滤波器参数
{
// h-滤波器参数
// dI-整数延时
// dF-小数延时
// index-波束标号
int bid = 0,tid = 0;
int k=0;
float dfs = 0.0;
int DI = 0;
__shared__ float sum;
//__shared__ float dfs;
bid = blockIdx.x;//通道标号
tid = threadIdx.x;//滤波器系数标号
if(tid == 0)
{
sum = 0.0;
dfs = delaytime[index+bid];//延时
DI = int(dfs);//整数延时
dF[bid] =dfs-DI;//小数延时
dI[bid] = DI;
//printf("bid=%d,m=%d,theta = %.3f,dfs = %.3f,DI = %d\n",bid,m,theta,dfs,DI);
}
//块内线程同步
__syncthreads();
k = tid-M;
h[bid*(2*M+1)+tid] = sin(k*1.0*PI-dF[bid]*PI+0.000001)/(k*1.0*PI-dF[bid]*PI+0.000001);
//块内线程同步
__syncthreads();
if(tid == 0)
{
for(int k=0;k<2*M+1;k++)
{
sum = sum + h[bid*(2*M+1)+k];
}
}
__syncthreads();
h[bid*(2*M+1)+tid] = h[bid*(2*M+1)+tid]/sum;
}
__global__ void ActiveFineDelayFilter(cufftReal *dev_xin,cufftReal *dev_yout,cufftReal *delayfilter)//小数时延滤波器
{
int bid,tid;
float x=0.0,h=0.0;
float sum = 0.0;
bid = blockIdx.x;//数据标号
tid = threadIdx.x;//滤波器系数标号
__shared__ float y[2*M+1];
if(tid == 0)
{
for(int ii=0;ii<2*M+1;ii++)
{
y[ii] = 0.0;
}
}
if(bid+tid >= M && bid+tid <= FRAMELEN+M)
{
x = dev_xin[bid-M+tid];
}
if(2*M-tid >=0)
{
h = delayfilter[2*M-tid];
}
y[tid] = x*h;
//块内线程同步
__syncthreads();
if(tid == 0)
{
sum = 0.0;
for(int jj=0;jj<2*M+1;jj++)
{
sum = sum + y[jj];
}
dev_yout[bid] = sum;
}
}
__global__ void VectorMultiplier(cufftComplex *dev_in,cufftComplex *dev_h,cufftComplex *dev_out)
{
int bid=blockIdx.x;
dev_out[bid].x=dev_in[bid].x*dev_h[bid].x-dev_in[bid].y*dev_h[bid].y;
dev_out[bid].y=dev_in[bid].x*dev_h[bid].y+dev_in[bid].y*dev_h[bid].x;
}
__global__ void HilbFilt(cufftComplex *dev_hilboutfreq, cufftComplex *dev_matchdatafreq, int mid) //Hilbert频域变换
{
int bid=blockIdx.x;
float xx=dev_matchdatafreq[bid].x;
float yy=dev_matchdatafreq[bid].y;
if(bid<=mid)
{
dev_hilboutfreq[bid].x=yy;
dev_hilboutfreq[bid].y=-xx;
}
else{
dev_hilboutfreq[bid].x=-yy;
dev_hilboutfreq[bid].y=xx;
}
}
__global__ void DevFindPeak(cufftReal *dev_beamdata,int *dev_peak,int datalen)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
int bid=blockIdx.x;
// int tid=threadIdx.x;
for(i=0;i<datalen;i++)
{
a0=*(dev_beamdata+bid*datalen+i);
//先向前找
for(j=1;j<11;j++)
{
if ((i+j)>=datalen)
{
a1=*(dev_beamdata+bid*datalen+i+j-datalen);
}
else
{
a1=*(dev_beamdata+bid*datalen+i+j);
}
if (a0>a1)
{
acc=acc+1;
}
}
a0=*(dev_beamdata+bid*datalen+i);
////再向后找
for(j=1;j<11;j++)
{
if ((i-j)<0)
{
a1=*(dev_beamdata+bid*datalen+i-j+datalen);
}
else
{
a1=*(dev_beamdata+bid*datalen+i-j);
}
if (a0>a1)
{
acc1=acc1+1;
}
}
if ((acc==10) && (acc1==10))
{
//if(bid == 0)
//{
// printf("%d:%.1f\n",i,*(dev_beamdata+bid*datalen+i));
//}
*(dev_peak+bid*datalen+i)=1;
}
acc=0;
acc1=0;
}
}
__global__ void DevFindValley(cufftReal *dev_beamdata,int *dev_valley,int datalen)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
int bid=blockIdx.x;
for(i=0;i<datalen;i++)
{
a0=*(dev_beamdata+bid*datalen+i);
//先向前找
for(j=1;j<6;j++)
{
if ((i+j)>=datalen)
{
//a1=*(data+i+j-dn);
break;
}
else
{
a1=*(dev_beamdata+bid*datalen+i+j);
}
if (a0<a1)
{
acc=acc+1;
}
}
if(j<5) //循环因break退出
{
acc = 5;
}
a0=*(dev_beamdata+bid*datalen+i);
////再向后找
for(j=1;j<6;j++)
{
if ((i-j)<0)
{
//a1=*(data+i-j+dn);
break;
}
else
{
a1=*(dev_beamdata+bid*datalen+i-j);
}
if (a0<a1)
{
acc1=acc1+1;
}
}
if(j<5) //循环因break退出
{
acc1 = 5;
}
if ((acc==5) && (acc1==5))
{
*(dev_valley+bid*datalen+i)=1;
}
acc=0;
acc1=0;
}
}
__global__ void DevPeakDetection(int *dev_peak,int *dev_valley,cufftReal *dev_beamdata,cufftReal *dev_preselected,cufftReal *dev_selected,int datalen,float threshold,float thresholdabs)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int index = 0,ll=0;
float pvr1 = 1.0,pvr2 = 1.0;
bool foundfirst = false;
float maxval = 0.0,c=1500.0;
for(int ii=1;ii<datalen-1;ii++)
{
if(dev_peak[bid*datalen+ii] ==1)
{
for(ll=ii+1;ll<datalen;ll++)
{
if(dev_valley[bid*datalen+ll] == 1)
{
index = ll;
break;
}
}
if(ll<=datalen-1)
{
pvr1 = dev_beamdata[bid*datalen+ii] / dev_beamdata[bid*datalen+index];
}
for(ll=ii-1;ll>=0;ll--)
{
if(dev_valley[bid*datalen+ll] == 1)
{
index = ll;
break;
}
}
if(ll>=0)
{
pvr2 = dev_beamdata[bid*datalen+ii] / dev_beamdata[bid*datalen+index];
}
if(pvr1 >= threshold && pvr2 >= threshold && dev_beamdata[bid*datalen+ii] > thresholdabs)
{
dev_preselected[bid*datalen+ii]=1;
}
else
{
dev_preselected[bid*datalen+ii]=0;
}
}
else
{
dev_preselected[bid*datalen+ii]=0;
}
}
//找第一个峰值和最大峰值
for(int ii=0;ii<datalen-1;ii++)
{
if(dev_preselected[bid*datalen+ii] == 1 && foundfirst == false)
{
foundfirst = true;
dev_selected[bid*4+0] = (DIRECTARRIVENUM*TL+ii) *1.0 / FS * c / 2;
//dev_selected[bid*3+0] = ii;
dev_selected[bid*4+1] = dev_beamdata[bid*datalen+ii];
}
if(dev_beamdata[bid*datalen+ii] > maxval)
{
dev_selected[bid*4+2] = (DIRECTARRIVENUM*TL+ii) *1.0 / FS * c / 2;
//dev_selected[bid*3+2] = ii;
dev_selected[bid*4+3] = dev_beamdata[bid*datalen+ii];
maxval = dev_beamdata[bid*datalen+ii];
}
}
if(bid == 4)
{
printf("%d:%.1f,%.1f,%.1f,%.1f\n",bid,dev_selected[bid*4+0],dev_selected[bid*4+1],dev_selected[bid*4+2],dev_selected[bid*4+3]);
}
}
__global__ void Envelope(cufftReal *dev_envelopedata, cufftReal *dev_delayfilterout, cufftReal *dev_hilbout) //求包络
{
int bid=blockIdx.x;
float xx=dev_delayfilterout[bid];
float yy=dev_hilbout[bid]/FRAMELEN;//一定要归一化!!!!
dev_envelopedata[bid]=sqrt(xx*xx+yy*yy);
}
void *ReadBoard0Data(void *lParam){
int fileindex = 0;
std::string FilePath = "/home/ubuntu/Documents/Active/"; //数据文件路径,根据需要更改
std::string FileNamePre = "Board0_ADC_";
std::string FileIdx = std::to_string(fileindex);
std::string FileNameSur = ".bin";
std::string FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
int DataFileNum = 1;
FILE *fp = NULL;
int readbuf[TL*CHANNUM+3];
int CounterA = FRAMELEN;
int temp = 0;
bool foundpulse = false;
//QueryPerformanceFrequency(&nFreq);
if(DataBufA_B1 != NULL)
{
free(DataBufA_B1);
DataBufA_B1 = NULL;
}
DataBufA_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B1,0,FRAMELEN*CHANNUM*sizeof(int));
//QueryPerformanceCounter(&nBeginTime);
//每次读取1个数据包,即17samples*16channels,数据类型为24bit整型,以int型存储
for(int ii=0;ii<DataFileNum;ii++)
{
fileindex = ii;
FileIdx = std::to_string(fileindex);
FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
if(fp != NULL)
{
fclose(fp);
fp = NULL;
}
fp = fopen(FileName.c_str(),"rb");
int num=0;
for(int jj=0;jj<8e4;jj++)//
{
usleep(TL*1e6 / FS);//wait
fread(readbuf,sizeof(int),TL*CHANNUM+3,fp);
//搜索脉冲前沿
if(!foundpulse)
{
float dataval = 0.0;
for(int kk=0;kk<TL;kk++)
{
temp = readbuf[3+kk*CHANNUM+1];//2通道
temp = temp<<8;
temp = temp>>8;
dataval = temp*1.0/pow(2.0,23) * 2.5;
if(fabs(dataval) > 0.5)
{
foundpulse = true;
break;
}
}
}
if(foundpulse && num++ > DIRECTARRIVENUM) //去掉直达波后510个点 510/17=30
{
memcpy(DataBufA_B1+FRAMELEN*CHANNUM-CounterA*CHANNUM,readbuf+3,TL*CHANNUM*sizeof(int));
CounterA = CounterA-TL;
if(CounterA == 0)
{
//使事件有效
pthread_mutex_lock(&count_lock_Board0DataReady);
pthread_cond_signal(&cond_Board0DataReady);
count_Board0DataReady = count_Board0DataReady+1;
pthread_mutex_unlock(&count_lock_Board0DataReady);
foundpulse = false;
CounterA = FRAMELEN;
num=0;
printf("readboard0data.\n");
}
}
}
}
return NULL;
}
void *ActiveDataFormatting(void *lParam)
{
int temp = 0;
// int index=0;
// FILE *fp=NULL;
if(ChannDataBufA != NULL)
{
free(ChannDataBufA);
ChannDataBufA = NULL;
}
ChannDataBufA = (float *)malloc(FRAMELEN*CHANNUM*sizeof(float));
memset(ChannDataBufA,0,FRAMELEN*CHANNUM*sizeof(float));
if(ChannDataBuf != NULL)
{
free(ChannDataBuf);
ChannDataBuf = NULL;
}
ChannDataBuf = (float *)malloc(FRAMELEN*ELENUM*sizeof(float));
memset(ChannDataBuf,0,FRAMELEN*ELENUM*sizeof(float));
while(1)
{
pthread_mutex_lock(&count_lock_Board0DataReady);
while (count_Board0DataReady == 0)
{
pthread_cond_wait(&cond_Board0DataReady,&count_lock_Board0DataReady);
}
count_Board0DataReady = count_Board0DataReady -1;
pthread_mutex_unlock(&count_lock_Board0DataReady);
printf("dataformatting begin\n");
for(int ii=0;ii<CHANNUM;ii++)
{
for(int jj=0;jj<FRAMELEN;jj++)
{
temp = DataBufA_B1[jj*CHANNUM+ii];
temp = temp<<8;
temp = temp>>8;
if(ii==1 || ii==6 || ii==7 || ii==9 || ii==11)
{
ChannDataBufA[ii*FRAMELEN+jj] = -temp*1.0/pow(2.0,23) * 2.5;
}
else
{
ChannDataBufA[ii*FRAMELEN+jj] = temp*1.0/pow(2.0,23) * 2.5;
}
}
}
//去掉多余4个通道的数据
memcpy(ChannDataBuf,ChannDataBufA,sizeof(float)*ELENUM*FRAMELEN);
pthread_mutex_lock(&count_lock_ActiveFrameDataReady);
pthread_cond_signal(&cond_ActiveFrameDataReady);
count_ActiveFrameDataReady = count_ActiveFrameDataReady+1;
pthread_mutex_unlock(&count_lock_ActiveFrameDataReady);
// std::string fname="/home/ubuntu/Documents/Active/tmp/formdata"+std::to_string(index++)+".bin";
// fp=fopen(fname.c_str(),"wb");
// fwrite(ChannDataBuf,sizeof(float),12*FRAMELEN,fp);
// fclose(fp);
// fp=NULL;
printf("dataformmating end\n");
}
}
void InitProcessing(){
//-----------------------------------Init();-------------------------------
//翻转参考信号:频域匹配
for(int ii=0;ii<NREF;ii++)
{
float t=1.0*ii/FS;
rsRef[NREF-1-ii]=sin(2*PI*(90e3*t+0.5e7*t*t));
}
//阵元坐标
for(int jj=0;jj<6;jj++)
{
xEle[jj]=23e-3*sin(jj*PI/3);
zEle[jj]=23e-3*cos(jj*PI/3);
}
for(int jj=0;jj<6;jj++)
{
xEle[6+jj]=11.5e-3*sin(jj*PI/3);
zEle[6+jj]=11.5e-3*cos(jj*PI/3);
}
//延时
for(int ii=0;ii<3;ii++)
{
for(int jj=0;jj<9;jj++)
{
for (int kk=0;kk<12;kk++)
{
dTime[(ii*9+jj)*12+kk] = (xEle[kk]*sin(theta[ii])*cos(phi[jj])+zEle[kk]*cos(theta[ii]))/UWC*FS;
}
}
}
//---------------------------------------------------Init finished-----------------------------
}
void *ActiveSignalProcessing(void *lParam)
{
float temp[27*FRAMELEN]={0.0};
InitProcessing();
int FrameNum = 0;
int fIndex=0;
FILE * wfp=NULL;
std::string FileName="";
//-----------------滤波参数-------------------------------
float h[FIRORDER+1] = {0.0};
float fl = 80e3f,fh = 120e3f;
cudaError cudaStatus;
cufftReal *dev_x=NULL; //12通道原始数据
cufftReal *dev_x_s=NULL; //单个通道原始数据:后面需要补零
cufftReal *dev_h=NULL; //滤波器系数
cufftComplex *dev_fft_x=NULL; //12通道原始数据FFT
cufftComplex *dev_fft_h=NULL; //滤波器系数FFT
cufftComplex *dev_fft_y=NULL; //滤波器输出FFT
cufftReal *dev_y=NULL; //滤波器输出原始采样率时域信号
cufftReal *dev_chanbuff=NULL; //显存内数据缓冲区
//float *FilteredDataout = NULL;
// float *DownSamplingData = NULL;
cufftHandle Hplan; //滤波器系数FFT
cufftHandle Xplan; //通道原始数据FFT
cufftHandle Yplan; //滤波后通道数据FFT
cufftHandle HXplan; //Hilbert原始数据FFT
cufftHandle HYplan; //Hilbert滤波后数据FFT
cufftHandle MXplan;
cufftHandle MHplan; //匹配滤波器系数
cufftHandle MYplan;
//_Longlong FiltDataFileIndex=0;
//----------------------------------------------------------------
//-----------------波束形成参数-------------------------------
cufftReal *dev_dTime=NULL;
cufftReal *dev_mat=NULL;//匹配滤波器系数
cufftComplex *dev_fft_mat=NULL;//匹配滤波器频响
float *beamdata=NULL;
beamdata= (float *)malloc(FRAMELEN*PHINUM*THETANUM*sizeof(float));
memset(beamdata,0,FRAMELEN*PHINUM*THETANUM*sizeof(float));
cufftReal *dev_delayfilterbuf=NULL; //所有通道整数延时数据
cufftReal *dev_delayfilterout=NULL; //所有通道精细延时数据
cufftReal *dev_delayFilter=NULL; //延时滤波器参数
int *dev_dI=NULL; //所有通道整数时延
cufftReal *dev_dF=NULL; //所有通道小数时延
cufftReal *dev_delaydata=NULL; //波束形成结果
cufftReal *dev_matchdata=NULL; //匹配滤波结果
cufftReal *dev_beamdata=NULL;
cufftComplex *dev_fft_delaydata=NULL; //匹配滤波器输入
cufftComplex *dev_fft_matout=NULL; //匹配滤波器输出频谱
int *dev_peak = NULL; //各波束中的峰值点
int *dev_valley = NULL; //各波束中的谷点
cufftReal *dev_preselected = NULL; //预选的峰值点
cufftReal *dev_selected = NULL; //筛选出的峰值点
int *peak = NULL;
int *valley = NULL;
//-----------------Hilbert变换参数-------------------------------
cufftComplex *dev_matchdatafreq=NULL; //信号频响
cudaStatus = cudaMalloc((void **)&dev_matchdatafreq, sizeof(cufftComplex)*NHILBT);
cudaMemset((void **)&dev_matchdatafreq,0,sizeof(cufftComplex)*NHILBT);
cufftComplex *dev_hilboutfreq=NULL; //输出频响
cudaStatus = cudaMalloc((void **)&dev_hilboutfreq, sizeof(cufftComplex)*NHILBT);
cudaMemset((void **)&dev_hilboutfreq,0,sizeof(cufftComplex)*NHILBT);
cufftReal *dev_hilbout=NULL; //输出信号
cudaStatus = cudaMalloc((void **)&dev_hilbout, sizeof(cufftReal)*FRAMELEN);
cudaMemset((void **)&dev_hilbout,0,sizeof(cufftReal)*FRAMELEN);
cufftReal *dev_envelopedata=NULL;//包络
cudaStatus = cudaMalloc((void **)&dev_envelopedata, sizeof(cufftReal)*FRAMELEN);
cudaMemset((void **)&dev_envelopedata,0,sizeof(cufftReal)*FRAMELEN);
//----------------------------------------------------------------
//-----------------调试:分配内存-----------------------------------
cufftPlan1d(&Hplan, NFIR, CUFFT_R2C, 1);
cufftPlan1d(&Xplan, NFIR, CUFFT_R2C, 1);
cufftPlan1d(&Yplan, NFIR, CUFFT_C2R, 1);
cufftPlan1d(&HXplan, NHILBT, CUFFT_R2C, 1);
cufftPlan1d(&HYplan, FRAMELEN, CUFFT_C2R, 1);
cufftPlan1d(&MXplan, NMAT, CUFFT_R2C, 1);
cufftPlan1d(&MHplan, NMAT, CUFFT_R2C, 1);
cufftPlan1d(&MYplan, NMAT, CUFFT_C2R, 1);
cudaStatus = cudaMalloc((void **)&dev_dTime, sizeof(cufftReal)*(PHINUM*THETANUM*ELENUM));//将延时向量写入显存
if (cudaStatus != cudaSuccess)
{
printf (" dev_dTime cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_dTime,0,sizeof(cufftReal)*(PHINUM*THETANUM*ELENUM));
cudaMemcpy(dev_dTime,dTime,sizeof(cufftReal)*(PHINUM*THETANUM*ELENUM),cudaMemcpyHostToDevice);
cudaStatus = cudaMalloc((void **)&dev_mat, sizeof(cufftReal)*NMAT);//将参考信号写入显存:频域
if (cudaStatus != cudaSuccess)
{
printf (" dev_mat cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_mat,0,sizeof(cufftReal)*NMAT);
cudaMemcpy(dev_mat+NMAT-NREF,rsRef,sizeof(cufftReal)*NREF,cudaMemcpyHostToDevice);//对其尾部
cudaStatus = cudaMalloc((void **)&dev_fft_mat, sizeof(cufftComplex)*NMAT);//匹配滤波参数频谱
if (cudaStatus != cudaSuccess)
{
printf (" dev_fft_mat cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_fft_mat,0,sizeof(cufftComplex)*NMAT);
cufftExecR2C(MHplan,(cufftReal *)&dev_mat[0],(cufftComplex *)&dev_fft_mat[0]);
cudaStatus = cudaMalloc((void **)&dev_fft_matout, sizeof(cufftComplex)*NMAT);//匹配滤波输出频谱
if (cudaStatus != cudaSuccess)
{
printf (" dev_fft_matout cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_fft_matout,0,sizeof(cufftComplex)*NMAT);
cudaStatus = cudaMalloc((void **)&dev_x, sizeof(cufftReal)*(FRAMELEN*ELENUM));//给原始数据分配显存
if (cudaStatus != cudaSuccess)
{
printf (" dev_x cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_x,0,sizeof(cufftReal)*FRAMELEN*ELENUM);
cudaStatus = cudaMalloc((void **)&dev_x_s, sizeof(cufftReal)*NFIR);//给单个通道原始数据分配显存
if (cudaStatus != cudaSuccess)
{
printf (" dev_x_s cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_x_s,0,sizeof(cufftReal)*NFIR);
cudaStatus = cudaMalloc((void **)&dev_h, sizeof(cufftReal)*NFIR);//给滤波器参数分配显存
if (cudaStatus != cudaSuccess)
{
printf ("dev_h cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_h,0,sizeof(cufftReal)*NFIR);
cudaStatus = cudaMalloc((void **)&dev_y, sizeof(cufftReal)*NFIR*ELENUM);//给滤波器输出数据分配显存
if (cudaStatus != cudaSuccess)
{
printf ("dev_y cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_y,0,sizeof(cufftReal)*NFIR*ELENUM);
cudaStatus = cudaMalloc((void **)&dev_fft_x,sizeof(cufftComplex)*NFIR*ELENUM);//给原始信号频域分配显存
if (cudaStatus != cudaSuccess)
{
printf ("dev_fft_x cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_fft_x,0,sizeof(cufftComplex)*NFIR*ELENUM);
cudaStatus = cudaMalloc((void **)&dev_fft_h,sizeof(cufftComplex)*NFIR);//给滤波器频域分配显存
if (cudaStatus != cudaSuccess)
{
printf ("dev_fft_h cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_fft_h,0,sizeof(cufftComplex)*NFIR);
cudaStatus = cudaMalloc((void **)&dev_fft_y,sizeof(cufftComplex)*(ELENUM*NFIR));//给输出数据频域分配显存
if (cudaStatus != cudaSuccess)
{
printf ("dev_fft_y cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_fft_y,0,sizeof(cufftComplex)*(ELENUM*NFIR));
cudaStatus = cudaMalloc((void **)&dev_chanbuff,sizeof(cufftReal)*FILTER_FRAME*ELENUM);//给通道缓存数据分配显存
if (cudaStatus != cudaSuccess)
{
printf ("dev_chanbuff cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_chanbuff,0,sizeof(cufftReal)*FILTER_FRAME*ELENUM);
fir1(FIRORDER,3,fl,fh,FS,5,h);
cudaMemcpy(dev_h,h,sizeof(cufftReal)*FIRORDER,cudaMemcpyHostToDevice);
cufftExecR2C(Hplan,(cufftReal *)&dev_h[0],(cufftComplex *)&dev_fft_h[0]);//得到滤波器频域响应dev_fft_h
//---------------------------------------------------------------
cudaStatus =cudaMalloc((void **)&dev_delayfilterbuf,sizeof(cufftReal)*FRAMELEN*ELENUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_delayfilterbuf cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_delayfilterbuf,0,sizeof(cufftReal)*FRAMELEN*ELENUM);
cudaStatus =cudaMalloc((void **)&dev_delayfilterout,sizeof(cufftReal)*FRAMELEN*ELENUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_delayfilterout cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_delayfilterout,0,sizeof(cufftReal)*FRAMELEN*ELENUM);
cudaStatus =cudaMalloc((void **)&dev_delayFilter,sizeof(cufftReal)*(2*M+1)*ELENUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_delayFilter cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_delayFilter,0,sizeof(cufftReal)*(2*M+1)*ELENUM);
cudaStatus =cudaMalloc((void **)&dev_dI,sizeof(int)*ELENUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_dI cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_dI,0,sizeof(int)*ELENUM);
cudaStatus =cudaMalloc((void **)&dev_dF,sizeof(cufftReal)*ELENUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_dF cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_dF,0,sizeof(cufftReal)*ELENUM);
cudaStatus =cudaMalloc((void **)&dev_delaydata,sizeof(cufftReal)*FRAMELEN);
if (cudaStatus != cudaSuccess)
{
printf ("dev_delaydata cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_delaydata,0,sizeof(cufftReal)*FRAMELEN);
cudaStatus =cudaMalloc((void **)&dev_matchdata,sizeof(cufftReal)*NMAT);
if (cudaStatus != cudaSuccess)
{
printf ("dev_matchdata cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_matchdata,0,sizeof(cufftReal)*NMAT);
cudaStatus =cudaMalloc((void **)&dev_beamdata,sizeof(cufftReal)*THETANUM*PHINUM*FRAMELEN);
if (cudaStatus != cudaSuccess)
{
printf ("dev_beamdata cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_beamdata,0,sizeof(cufftReal)*THETANUM*PHINUM*FRAMELEN);
cudaStatus = cudaMalloc((void **)&dev_fft_delaydata,sizeof(cufftComplex)*NMAT);//给原始信号频域分配显存
if (cudaStatus != cudaSuccess)
{
printf ("dev_fft_delaydata cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_fft_delaydata,0,sizeof(cufftComplex)*NMAT);
cudaStatus = cudaMalloc((void **)&dev_peak,sizeof(int)*THETANUM*PHINUM*FRAMELEN);
if (cudaStatus != cudaSuccess)
{
printf ("dev_peak cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_peak,0,sizeof(int)*THETANUM*PHINUM*FRAMELEN);
cudaStatus = cudaMalloc((void **)&dev_valley,sizeof(int)*THETANUM*PHINUM*FRAMELEN);
if (cudaStatus != cudaSuccess)
{
printf ("dev_valley cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_valley,0,sizeof(int)*THETANUM*PHINUM*FRAMELEN);
cudaStatus = cudaMalloc((void **)&dev_preselected,sizeof(cufftReal)*THETANUM*PHINUM*FRAMELEN);
if (cudaStatus != cudaSuccess)
{
printf ("dev_preselected cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_preselected,0,sizeof(cufftReal)*THETANUM*PHINUM*FRAMELEN);
cudaStatus = cudaMalloc((void **)&dev_selected,sizeof(cufftReal)*THETANUM*PHINUM*4);
if (cudaStatus != cudaSuccess)
{
printf ("dev_selected cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_selected,0,sizeof(cufftReal)*THETANUM*PHINUM*4);
peak = (int *)malloc(THETANUM*PHINUM*FRAMELEN*sizeof(int));
memset(peak,0,THETANUM*PHINUM*FRAMELEN*sizeof(int));
valley = (int *)malloc(THETANUM*PHINUM*FRAMELEN*sizeof(int));
memset(valley,0,THETANUM*PHINUM*FRAMELEN*sizeof(int));
//-----------------调试:分配滤波器内存结束-----------------------------------
//--------------------------测时延变量----------------------------
cudaEvent_t start1;
cudaEvent_t stop1;
float msecTotal = 0.0f;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
//----------------------------------------------------------------
while(1)
{
FileName="/home/ubuntu/Documents/Active/tmp/beamdata"+std::to_string(fIndex++)+".bin";
wfp=fopen(FileName.c_str(),"wb");
cudaEventRecord(start1,NULL);
printf("wait for process\n");
pthread_mutex_lock(&count_lock_ActiveFrameDataReady);
while (count_ActiveFrameDataReady == 0)
{
pthread_cond_wait(&cond_ActiveFrameDataReady,&count_lock_ActiveFrameDataReady);
}
count_ActiveFrameDataReady = count_ActiveFrameDataReady -1;
pthread_mutex_unlock(&count_lock_ActiveFrameDataReady);
FrameNum++;
cudaMemcpy(dev_x,ChannDataBuf,sizeof(cufftReal)*FRAMELEN*ELENUM,cudaMemcpyHostToDevice);//将ChannDataBuf数据写入dev_x
//-----------------------------------------(1) 信号滤波(2.4ms)---------------------------------------------------
//
for(int jj=0;jj<ELENUM;jj++)
{
cudaMemcpy(dev_x_s,dev_x+FRAMELEN*jj,sizeof(cufftReal)*FRAMELEN,cudaMemcpyDeviceToDevice);//将dev_x中第jj通道的数据移入到dev_x_s中
cufftExecR2C(Xplan,(cufftReal *)&dev_x_s[0],(cufftComplex *)&dev_fft_x[jj*NFIR]);//原信号傅立叶变换
}
//
//频域相乘
ActiveFilter<<<ELENUM*NFIR/THREADNUMPERBLK,THREADNUMPERBLK>>>(dev_fft_x,dev_fft_h,dev_fft_y,NFIR);
//反变换
for(int jj=0;jj<ELENUM;jj++)
{
cufftExecC2R(Yplan,dev_fft_y+jj*NFIR,dev_y+jj*NFIR);
cudaMemcpy((float*)&dev_chanbuff[jj*FILTER_FRAME],(cufftReal*)&dev_y[jj*NFIR+FIRORDER/2],sizeof(float)*FILTER_FRAME,cudaMemcpyDeviceToDevice);
}
//QueryPerformanceCounter(&nEndTime);
//cudaEventRecord(stop1,NULL);
//cudaEventSynchronize(stop1);
//-----------------------------------------(1) 信号滤波结束---------------------------------------------------
//-----------------------------------------(2) 波束形成(223ms)---------------------------------------------------
//cudaEventRecord(start1,NULL);
//求延时后信号
for(int ii=0;ii<THETANUM;ii++)
{//俯仰角
for(int jj=0;jj<PHINUM;jj++)
{//方位角
int index=(ii*9+jj)*12;//时延标号
ActiveDelayFilterGen<<<ELENUM,2*M+1>>>(dev_delayFilter,dev_dI,dev_dF,dev_dTime,index);
for (int kk=0;kk<12;kk++)
{
int DI=(int)dTime[index+kk];//整数时延
float DF=dTime[index+kk];
if(DI>=0)
{
cudaMemcpy(dev_delayfilterbuf+kk*FRAMELEN+DI,dev_chanbuff+kk*FRAMELEN,sizeof(cufftReal)*(FRAMELEN-DI),cudaMemcpyDeviceToDevice);
}
else
{
cudaMemcpy(dev_delayfilterbuf+kk*FRAMELEN,dev_chanbuff+kk*FRAMELEN-DI,sizeof(cufftReal)*(FRAMELEN+DI),cudaMemcpyDeviceToDevice);
}
if(DF > 0.0001)
{
ActiveFineDelayFilter<<<FRAMELEN,2*M+1>>>(dev_delayfilterbuf+kk*FRAMELEN,dev_delayfilterout+kk*FRAMELEN,dev_delayFilter+kk*(2*M+1));
//cudaMemcpy(dev_delayfilterout+kk*FRAMELEN,dev_delayfilterbuf+kk*FRAMELEN,sizeof(cufftReal)*FRAMELEN,cudaMemcpyDeviceToDevice);
}
else
{
cudaMemcpy(dev_delayfilterout+kk*FRAMELEN,dev_delayfilterbuf+kk*FRAMELEN,sizeof(cufftReal)*FRAMELEN,cudaMemcpyDeviceToDevice);
}
}
MatrixSumRow<<<FRAMELEN,1>>>(dev_delayfilterout,dev_delaydata,ELENUM,FRAMELEN);
//匹配滤波
//==============频域匹配滤波============================
cufftExecR2C(MXplan,dev_delaydata,dev_fft_delaydata);
VectorMultiplier<<<NMAT,1>>>(dev_fft_delaydata,dev_fft_mat,dev_fft_matout);
cufftExecC2R(MYplan,dev_fft_matout,dev_matchdata);
//Hilbert变换取包络(0.5ms)
//1.原始信号傅里叶变换 dev_matchdata -> dev_matchdatafreq
cufftExecR2C(HXplan,dev_matchdata,dev_matchdatafreq);
//2.频域变换 dev_matchdatafreq -> dev_hilboutfreq
HilbFilt<<<NHILBT,1>>>(dev_hilboutfreq, dev_matchdatafreq, NHILBT/2);
//3.逆傅里叶变换 dev_hilboutfreq -> dev_hilbout
cufftExecC2R(HYplan,dev_hilboutfreq,dev_hilbout);
//4.求包络 dev_matchdata,dev_hilbout ->dev_envelopedata
Envelope<<<FRAMELEN,1>>>( dev_envelopedata,dev_matchdata, dev_hilbout);
//数据存入 dev_beamdata
cudaMemcpy((cufftReal *)&dev_beamdata[(ii*9+jj)*FRAMELEN],dev_envelopedata,sizeof(cufftReal)*FRAMELEN,cudaMemcpyDeviceToDevice);
}
}
cudaMemset((void **)&dev_peak,0,sizeof(cufftReal)*THETANUM*PHINUM*FRAMELEN);
cudaMemset((void **)&dev_valley,0,sizeof(cufftReal)*THETANUM*PHINUM*FRAMELEN);
DevFindPeak<<<PHINUM*THETANUM,1>>>(dev_beamdata,dev_peak,FRAMELEN);
cudaMemcpy(peak,dev_peak,sizeof(cufftReal)*THETANUM*PHINUM*FRAMELEN,cudaMemcpyDeviceToHost);
DevFindValley<<<PHINUM*THETANUM,1>>>(dev_beamdata,dev_valley,FRAMELEN);
cudaMemcpy(valley,dev_valley,sizeof(cufftReal)*THETANUM*PHINUM*FRAMELEN,cudaMemcpyDeviceToHost);
DevPeakDetection<<<PHINUM*THETANUM,1>>>(dev_peak,dev_valley,dev_beamdata,dev_preselected,dev_selected,FRAMELEN,3.0,10000);
cudaEventRecord(stop1,NULL);
cudaEventSynchronize(stop1);
cudaMemcpy(beamdata,dev_beamdata,sizeof(cufftReal)*6800*27,cudaMemcpyDeviceToHost);//THETANUM*PHINUM*FRAMELEN
cudaEventElapsedTime(&msecTotal,start1,stop1);
cudaMemcpy(temp,dev_beamdata,sizeof(cufftReal)*6800*27,cudaMemcpyDeviceToHost);
fwrite(temp,sizeof(float),6800*27,wfp);
//printf("%d:%.3f:\n",FrameNum,msecTotal);
printf("%d:%.3f:\n",FrameNum,msecTotal);
printf("processing finished.\n");
fclose(wfp);
wfp=NULL;
//-----------------------------------------(2) 波束形成结束-----------------------------------------------
}
}
void *ActiveReceiveNetwork(void *lParam)
{
char errBuf[PCAP_ERRBUF_SIZE], *device;
pcap_t *handle;
bpf_u_int32 mask;
bpf_u_int32 net;
struct bpf_program filter;
char filter_app[] = "udp dst port 0"; //setting the filter package
struct pcap_pkthdr packet;
const u_char *pktStr;
char packtype = 0;
short portnumber = 0;
char sourceid = 0;
char FramenumN1 = -1, FramenumN2 = -1;
char LastFramenumN1 = 0, LastFramenumN2 = 0;
int readbufb1[TL*CHANNUM+1],readbufb2[TL*CHANNUM+1];
int BUF_FLAG_B1=0,BUF_FLAG_B2;
int *pBuf_B1 = NULL,*pBuf_B2 = NULL;
int *pCounter_B1 = NULL,*pCounter_B2 = NULL;
int CounterA_B1 = FRAMELEN;
int CounterA=FRAMELEN;
int temp = 0;
int FrameNum1 = 0,FrameNum2 = 0, FrameNum = 0;
bool foundpulse = false;
int num=0;
if(DataBufA_B1 != NULL)
{
free(DataBufA_B1);
DataBufA_B1 = NULL;
}
DataBufA_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B1,0,FRAMELEN*CHANNUM*sizeof(int));
//get the name of the first device suitable for capture
device = pcap_lookupdev(errBuf);
if ( device )
{
printf("success: device: %s\n",device);
}
else
{
printf("error: %s\n",errBuf);
return 0;
}
//open network device for packet capture
handle = pcap_open_live(device,BUFSIZ,1,0,errBuf);
//look up into from the capture device
pcap_lookupnet(device,&net,&mask,errBuf);
printf("net=%x mask=%x\n",net,mask);
//compiles the filter expression into a bpf filter rogram
printf("compiles the filter expression into a bpf filter program\r\n");
pcap_compile(handle,&filter,filter_app,0,net);
//load the filter program into the packet capture device
printf("load the filter program into the packet capture device\r\n");
pcap_setfilter(handle,&filter);
while (1)
{
//printf("before Received data!\n");
pktStr = pcap_next(handle,&packet);
//printf("Received data!\n");
if(pktStr != NULL)
{
//printf("Received data!\n");
//读取目的端口号
memcpy((char *)&portnumber,pktStr+37,sizeof(char));
memcpy((char *)&portnumber+1,pktStr+36,sizeof(char));
if (portnumber == DEST_PORT)
{
//读取包类型
memcpy(&packtype,pktStr+45,sizeof(char));
memcpy(&sourceid,pktStr+43,sizeof(char));
if (packtype == 0x10) // if packet is ADC packet
{
if(sourceid == 0)
{
FrameNum1++;
memcpy(readbufb1,pktStr+42,(TL*CHANNUM+1)*sizeof(int));
FramenumN1 = *(pktStr+44);
FramenumN1 = FramenumN1 >> 2;
if (FrameNum1 == 1)
{
LastFramenumN1 = FramenumN1;
}
else
{
if (FramenumN1 != LastFramenumN1+1 && FramenumN1+63 != LastFramenumN1)
{
printf("Lost Board1 data package!\n");
}
LastFramenumN1 = FramenumN1;
}
}
// if(sourceid == 2)
// {
// FrameNum2++;
// memcpy(readbufb2,pktStr+42,(TL*CHANNUM+1)*sizeof(int));
// FramenumN2 = *(pktStr+44);
// FramenumN2 = FramenumN2 >> 2;
// if (FrameNum2 == 1)
// {
// LastFramenumN2 = FramenumN2;
// }
// else
// {
// if (FramenumN2 != LastFramenumN2+1 && FramenumN2+63 != LastFramenumN2)
// {
// printf("Lost Board2 data package!\n");
// }
// LastFramenumN2 = FramenumN2;
// }
// }
//搜索脉冲前沿
if(!foundpulse)
{
float dataval = 0.0;
for(int kk=0;kk<TL;kk++)
{
temp = readbufb1[3+kk*CHANNUM+1];//2通道
temp = temp<<8;
temp = temp>>8;
dataval = temp*1.0/pow(2.0,23) * 2.5;
if(fabs(dataval) > 0.5)
{
foundpulse = true;
break;
}
}
}
if(foundpulse && num++ > DIRECTARRIVENUM) //去掉直达波后510个点 510/17=30
{
memcpy(DataBufA_B1+FRAMELEN*CHANNUM-CounterA*CHANNUM,readbufb1+3,TL*CHANNUM*sizeof(int));
CounterA = CounterA-TL;
if(CounterA == 0)
{
//使事件有效
pthread_mutex_lock(&count_lock_Board0DataReady);
pthread_cond_signal(&cond_Board0DataReady);
count_Board0DataReady = count_Board0DataReady+1;
pthread_mutex_unlock(&count_lock_Board0DataReady);
foundpulse = false;
CounterA = FRAMELEN;
num=0;
}
}
}
}
}
//printf("ReceiveNetworkData Finished!\n");
//pthread_mutex_lock(&count_lock_BoardDataReady);
//pthread_cond_signal(&cond_BoardDataReady);
//count_BoardDataReady = count_BoardDataReady+1;
//pthread_mutex_unlock(&count_lock_BoardDataReady);
}
}
|
ba0079def9b633b4741fb1744c3196042cb22a38.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file np_bicount_op.cu
* \brief numpy compatible bincount operator GPU registration
*/
#include "./np_bincount_op-inl.h"
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include "../tensor/util/tensor_util-inl.cuh"
#include "../tensor/util/tensor_util-inl.h"
namespace mxnet {
namespace op {
struct BincountFusedKernel {
template <typename DType, typename OType>
static MSHADOW_XINLINE void Map(int i, const DType* data, OType* out) {
int idx = data[i];
atomicAdd(&out[idx], 1);
}
template <typename DType, typename OType>
static MSHADOW_XINLINE void Map(int i, const DType* data, const OType* weights, OType* out) {
int idx = data[i];
atomicAdd(&out[idx], weights[i]);
}
};
struct is_valid_check {
template <typename DType>
MSHADOW_XINLINE static void Map(int i, char* invalid_ptr, const DType* data) {
if (data[i] < 0)
*invalid_ptr = 1;
}
};
template <typename DType>
bool CheckInvalidInput(mshadow::Stream<gpu>* s,
const DType* data,
const size_t& data_size,
char* is_valid_ptr) {
using namespace mxnet_op;
int32_t is_valid = 0;
Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr);
Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data);
CUDA_CALL(hipMemcpyAsync(&is_valid,
is_valid_ptr,
sizeof(char),
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
return is_valid == 0;
}
template <>
void NumpyBincountForwardImpl<gpu>(const OpContext& ctx,
const NDArray& data,
const NDArray& weights,
const NDArray& out,
const size_t& data_n,
const int& minlength) {
using namespace mxnet_op;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
MXNET_NO_FLOAT16_TYPE_SWITCH(data.dtype(), DType, {
DType* h_ptr;
DType* d_ptr;
int bin = minlength;
d_ptr = data.data().dptr<DType>();
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckInvalidInput(s, d_ptr, data_n, is_valid_ptr);
CHECK(is_valid) << "Input should be nonnegative number"; // check invalid input
h_ptr = reinterpret_cast<DType*>(malloc(data_n * sizeof(DType)));
CUDA_CALL(hipMemcpyAsync(h_ptr,
d_ptr,
data_n * sizeof(DType),
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
for (size_t i = 0; i < data_n; i++) {
if (h_ptr[i] + 1 > bin)
bin = h_ptr[i] + 1;
}
free(h_ptr);
mxnet::TShape s(1, bin);
const_cast<NDArray&>(out).Init(s); // set the output shape forcefully
});
MSHADOW_TYPE_SWITCH(data.dtype(), DType, {
MSHADOW_TYPE_SWITCH(weights.dtype(), OType, {
size_t out_size = out.shape().Size();
Kernel<set_zero, gpu>::Launch(s, out_size, out.data().dptr<OType>());
Kernel<BincountFusedKernel, gpu>::Launch(s,
data_n,
data.data().dptr<DType>(),
weights.data().dptr<OType>(),
out.data().dptr<OType>());
});
});
}
template <>
void NumpyBincountForwardImpl<gpu>(const OpContext& ctx,
const NDArray& data,
const NDArray& out,
const size_t& data_n,
const int& minlength) {
using namespace mxnet_op;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
MXNET_NO_FLOAT16_TYPE_SWITCH(data.dtype(), DType, {
DType* h_ptr;
DType* d_ptr;
int bin = minlength;
d_ptr = data.data().dptr<DType>();
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckInvalidInput(s, d_ptr, data_n, is_valid_ptr);
CHECK(is_valid) << "Input should be nonnegative number"; // check invalid input
h_ptr = reinterpret_cast<DType*>(malloc(data_n * sizeof(DType)));
CUDA_CALL(hipMemcpyAsync(h_ptr,
d_ptr,
data_n * sizeof(DType),
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
for (size_t i = 0; i < data_n; i++) {
if (h_ptr[i] + 1 > bin)
bin = h_ptr[i] + 1;
}
free(h_ptr);
mxnet::TShape s(1, bin);
const_cast<NDArray&>(out).Init(s); // set the output shape forcefully
});
MSHADOW_TYPE_SWITCH(data.dtype(), DType, {
MSHADOW_TYPE_SWITCH(out.dtype(), OType, {
size_t out_size = out.shape().Size();
Kernel<set_zero, gpu>::Launch(s, out_size, out.data().dptr<OType>());
Kernel<BincountFusedKernel, gpu>::Launch(
s, data_n, data.data().dptr<DType>(), out.data().dptr<OType>());
});
});
}
NNVM_REGISTER_OP(_npi_bincount).set_attr<FComputeEx>("FComputeEx<gpu>", NumpyBincountForward<gpu>);
} // namespace op
} // namespace mxnet
|
ba0079def9b633b4741fb1744c3196042cb22a38.cu
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file np_bicount_op.cu
* \brief numpy compatible bincount operator GPU registration
*/
#include "./np_bincount_op-inl.h"
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include "../tensor/util/tensor_util-inl.cuh"
#include "../tensor/util/tensor_util-inl.h"
namespace mxnet {
namespace op {
struct BincountFusedKernel {
template <typename DType, typename OType>
static MSHADOW_XINLINE void Map(int i, const DType* data, OType* out) {
int idx = data[i];
atomicAdd(&out[idx], 1);
}
template <typename DType, typename OType>
static MSHADOW_XINLINE void Map(int i, const DType* data, const OType* weights, OType* out) {
int idx = data[i];
atomicAdd(&out[idx], weights[i]);
}
};
struct is_valid_check {
template <typename DType>
MSHADOW_XINLINE static void Map(int i, char* invalid_ptr, const DType* data) {
if (data[i] < 0)
*invalid_ptr = 1;
}
};
template <typename DType>
bool CheckInvalidInput(mshadow::Stream<gpu>* s,
const DType* data,
const size_t& data_size,
char* is_valid_ptr) {
using namespace mxnet_op;
int32_t is_valid = 0;
Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr);
Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data);
CUDA_CALL(cudaMemcpyAsync(&is_valid,
is_valid_ptr,
sizeof(char),
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
return is_valid == 0;
}
template <>
void NumpyBincountForwardImpl<gpu>(const OpContext& ctx,
const NDArray& data,
const NDArray& weights,
const NDArray& out,
const size_t& data_n,
const int& minlength) {
using namespace mxnet_op;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
MXNET_NO_FLOAT16_TYPE_SWITCH(data.dtype(), DType, {
DType* h_ptr;
DType* d_ptr;
int bin = minlength;
d_ptr = data.data().dptr<DType>();
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckInvalidInput(s, d_ptr, data_n, is_valid_ptr);
CHECK(is_valid) << "Input should be nonnegative number"; // check invalid input
h_ptr = reinterpret_cast<DType*>(malloc(data_n * sizeof(DType)));
CUDA_CALL(cudaMemcpyAsync(h_ptr,
d_ptr,
data_n * sizeof(DType),
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
for (size_t i = 0; i < data_n; i++) {
if (h_ptr[i] + 1 > bin)
bin = h_ptr[i] + 1;
}
free(h_ptr);
mxnet::TShape s(1, bin);
const_cast<NDArray&>(out).Init(s); // set the output shape forcefully
});
MSHADOW_TYPE_SWITCH(data.dtype(), DType, {
MSHADOW_TYPE_SWITCH(weights.dtype(), OType, {
size_t out_size = out.shape().Size();
Kernel<set_zero, gpu>::Launch(s, out_size, out.data().dptr<OType>());
Kernel<BincountFusedKernel, gpu>::Launch(s,
data_n,
data.data().dptr<DType>(),
weights.data().dptr<OType>(),
out.data().dptr<OType>());
});
});
}
template <>
void NumpyBincountForwardImpl<gpu>(const OpContext& ctx,
const NDArray& data,
const NDArray& out,
const size_t& data_n,
const int& minlength) {
using namespace mxnet_op;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
MXNET_NO_FLOAT16_TYPE_SWITCH(data.dtype(), DType, {
DType* h_ptr;
DType* d_ptr;
int bin = minlength;
d_ptr = data.data().dptr<DType>();
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckInvalidInput(s, d_ptr, data_n, is_valid_ptr);
CHECK(is_valid) << "Input should be nonnegative number"; // check invalid input
h_ptr = reinterpret_cast<DType*>(malloc(data_n * sizeof(DType)));
CUDA_CALL(cudaMemcpyAsync(h_ptr,
d_ptr,
data_n * sizeof(DType),
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
for (size_t i = 0; i < data_n; i++) {
if (h_ptr[i] + 1 > bin)
bin = h_ptr[i] + 1;
}
free(h_ptr);
mxnet::TShape s(1, bin);
const_cast<NDArray&>(out).Init(s); // set the output shape forcefully
});
MSHADOW_TYPE_SWITCH(data.dtype(), DType, {
MSHADOW_TYPE_SWITCH(out.dtype(), OType, {
size_t out_size = out.shape().Size();
Kernel<set_zero, gpu>::Launch(s, out_size, out.data().dptr<OType>());
Kernel<BincountFusedKernel, gpu>::Launch(
s, data_n, data.data().dptr<DType>(), out.data().dptr<OType>());
});
});
}
NNVM_REGISTER_OP(_npi_bincount).set_attr<FComputeEx>("FComputeEx<gpu>", NumpyBincountForward<gpu>);
} // namespace op
} // namespace mxnet
|
194e65191dc5ff276f6fea2e0b7af65d20660332.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <hip/hip_runtime.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 128
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#define CHUNK_K 1
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B
// matrix in shared memory to minimize possible bank conflicts. Before
// performing the nvcuda::wmma::mma_sync operation, the warp must load the
// matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the
// memory access pattern is not specified for that function, each lane in the
// warp can read one or multiple matrix elements from different matrix rows or
// columns. For shared memory, such access can result in bank conflicts if
// different rows / columns of the matrix map to the same bank. By shifting each
// row and column by a few bytes, we make sure that they map to different banks,
// thus reducing the number of possible bank conflicts. The number of 32
// one-byte "uint8_t" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by
// nvcuda::wmma::load_matrix_sync.
#define SKEW 0 // Updated for int4
#define checkKernelErrors(expr) \
do { \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
hipGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
__global__ void apmm_w2a2(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) {
// GEMM configuration.
int K_TILES = K_GLOBAL / 128;
int W_bit_offset = M_GLOBAL*K_GLOBAL/128;
int X_bit_offset = N_GLOBAL*K_GLOBAL/128;
int ROW_BIT = K_GLOBAL/128;
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<M_GLOBAL; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// printf("W[%d][%d]: %x\n", i, j, *((int*)W+i*K_GLOBAL/32+j));
// }
// }
// }
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int b=0; b<xb; b++) {
// for(int i=0; i<N_GLOBAL; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// printf("bit: %d, X[%d][%d]: %x\n", b, i, j, *((int*)X+b*X_bit_offset + i*K_GLOBAL/32+j));
// }
// }
// }
// }
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = block_pos / (N_GLOBAL/32) * 32;
const unsigned int block_tile_j = block_pos % (N_GLOBAL/32) * 32;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_GLOBAL) {
break;
}
typedef union {
int4 vec;
int a[4];
} U4;
wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
for(int i=0; i < WARP_COL_TILES; i++)
for(int j = 0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const int4 *warp_ptr;
if (warpId < 2) {
warp_ptr = &W[block_tile_i * ROW_BIT] + warpId * 16 * ROW_BIT;
} else if (warpId < 4) {
warp_ptr = &W[block_tile_i * ROW_BIT + W_bit_offset] + (warpId-2) * 16 * ROW_BIT;
} else if (warpId < 6) {
warp_ptr = &X[block_tile_j * ROW_BIT] + (warpId -4) * 16 * ROW_BIT;
} else {
warp_ptr = &X[block_tile_j * ROW_BIT + X_bit_offset] + (warpId-6)*16*ROW_BIT;
}
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop.
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy
// the B matrix.
int *shmem_ptr = (int*)shmem + warpId*16*4*(CHUNK_K+SKEW) + (laneId/4)*4*(CHUNK_K+SKEW) + laneId%4;
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
// int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) +
// (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) +
// (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit.
int *lane_ptr = (int*)warp_ptr + laneId/4*ROW_BIT*4 + laneId%4 + tile_k*4;
*shmem_ptr = *lane_ptr;
shmem_ptr += 8*4*(CHUNK_K+SKEW);
lane_ptr += 8*ROW_BIT*4;
*shmem_ptr = *lane_ptr;
// U4 tmp_probe;
// tmp_probe.vec = *lane_ptr;
// printf("tmp_probe.a[0]: %d, tmp_probe.a[1]: %d, tmp_probe.a[2]: %d, tmp_probe.a[3]: %d\n", tmp_probe.a[0], tmp_probe.a[1], tmp_probe.a[2], tmp_probe.a[3]);
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<128; i++) {
// printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3));
// }
// }
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int t=0; t<a[i].num_elements; t++) {
// printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]);
// }
// printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step);
// }
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * 64 * 16 +
(warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8;
wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT);
}
}
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=62; i<64; i++) {
// for(int j=0; j<64; j++) {
// printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64+j));
// }
// }
// }
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
// int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO.
size_t idx = threadIdx.x/8 * 64 + threadIdx.x%8*4;
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx;
U4 tmp0;
U4 tmp1;
U4 tmp2;
U4 tmp3;
U4 val;
tmp0.vec = *((int4*)shmem_warp_stream_ptr);
tmp1.vec = *((int4*)shmem_warp_stream_ptr+8);
tmp2.vec = *((int4*)shmem_warp_stream_ptr+32*16);
tmp3.vec = *((int4*)shmem_warp_stream_ptr+8+32*16);
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i = 0; i < 4; i++) {
// printf("tmp0.a[%d], %d ", 3-i, tmp0.a[3-i]);
// }
// printf("\n");
// for(int i = 0; i < 4; i++) {
// printf("tmp1.a[%d], %d ", 3-i, tmp1.a[3-i]);
// }
// printf("\n");
// }
val.a[0] = tmp0.a[0] + 2*tmp1.a[0] + 2*tmp2.a[0] + 4*tmp3.a[0];
val.a[1] = tmp0.a[1] + 2*tmp1.a[1] + 2*tmp2.a[1] + 4*tmp3.a[1];
val.a[2] = tmp0.a[2] + 2*tmp1.a[2] + 2*tmp2.a[2] + 4*tmp3.a[2];
val.a[3] = tmp0.a[3] + 2*tmp1.a[3] + 2*tmp2.a[3] + 4*tmp3.a[3];
// if (warpId == 7) {
// printf("warpId: %d, laneId: %d, idx: %d, val[%d].a: %d, %d, %d, %d, tmp0: %d %d %d %d, tmp1: %d %d %d %d \n", warpId, laneId, idx, i, val[i].a[0], val[i].a[1], val[i].a[2], val[i].a[3], tmp0.a[0], tmp0.a[1], tmp0.a[2], tmp0.a[3], tmp1.a[0], tmp1.a[1], tmp1.a[2], tmp1.a[3] );
// }
__syncthreads();
// This warp's pointer to the C matrix data to copy memory from to shared memory.
// TODO: May be moved outside the for loop.
size_t gmem_idx = block_tile_i*N_GLOBAL + block_tile_j + (threadIdx.x/8)*N_GLOBAL + (threadIdx.x%8)*4;
// printf("block_tile_i: %d, block_tile_j: %d, warpId: %d, laneId: %d, gmem_idx: %d\n", block_tile_i, block_tile_j, warpId, laneId, gmem_idx);
// Now that shared memory contains all the D tiles, stream them to global memory.
int *dst_gmem_warp_stream_ptr = &D[gmem_idx];
*((int4 *)(dst_gmem_warp_stream_ptr)) = val.vec;
__syncthreads();
}
}
void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){
int *W_int = (int*) W;
int *X_int = (int*) X;
for(int b=0; b<W_BIT; b++) {
for(int i = 0; i < M_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i;
W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
for(int b = 0; b<X_BIT; b++) {
for(int i = 0; i < N_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// X_int[i*K_GLOBAL/32+j] = i*M_GLOBAL + j;
X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
}
int popcnt(int i) {
// Java: use int, and use >>> instead of >>
// C or C++: use int
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
}
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp % 2)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) {
int *W_int = (int*) W;
int *X_int = (int*) X;
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
ref_C[m*N_GLOBAL+n]= tmp;
}
}
}
void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int X_BIT, int W_BIT, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
int *W_int = (int*) W;
int *X_int = (int*) X;
int C_ref_before_decompose[M_GLOBAL*N_GLOBAL];
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
C_ref_before_decompose[m*K_GLOBAL+n]= tmp;
}
}
for(int m=0; m<M_GLOBAL; m++) {
for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) {
int val[OUT_BIT];
for(int b=0; b<OUT_BIT; b++) val[b] = 0;
for(int n=0; n<32; n++) {
int tmp = C_ref_before_decompose[m*K_GLOBAL+n_tile*32+n];
tmp = (tmp - 128); // Can be modified for other quantized parameters.
for(int b=0; b<OUT_BIT; b++) {
int mask = 1;
val[b] = val[b] << 1;
val[b] = val[b] | ((mask<<b) & tmp);
}
}
for(int b=0; b<OUT_BIT; b++) {
ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile/32] = val[b];
}
}
}
}
void validate_results(int *C, int* ref_C, int M_, int N_) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i < M_; i++) {
for(int j = 0; j < N_; j++) {
int idx = i*N_+j;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result with pack for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int m = 0; m < M_; m++) {
for(int n_tile = 0; n_tile < N_/32; n_tile++) {
for(int b=0; b<OUT_BIT; b++) {
int idx = b*M_*N_/32 + m*N_/32+n_tile;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("m: %d, n_tile: %d, b: %d, C: %d, ref_C: %d\n", m, n_tile, b, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
#define verify_output
int main(int argc, char **argv) {
int dev = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
int X_BIT = 2;
int W_BIT = 2;
for (int M_GLOBAL=128; M_GLOBAL<=1024; M_GLOBAL += 128 ) {
// int M_GLOBAL = 256;
int N_GLOBAL = M_GLOBAL;
int K_GLOBAL = M_GLOBAL;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT));
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL));
#ifdef verify_output
int4 *W_h = NULL;
int4 *X_h = NULL;
int *Output_h = NULL;
W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT);
X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT);
Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
printf("Preparing validation data for GPU...\n");
init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
checkCudaErrors(hipMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, hipMemcpyHostToDevice));
#endif
int SHMEM_SZ = 65536;
checkCudaErrors(hipFuncSetAttribute(
apmm_w2a2, hipFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 200;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
hipEvent_t bmma_start;
hipEvent_t bmma_end;
hipEventCreate(&bmma_start);
hipEventCreate(&bmma_end);
hipEventRecord(bmma_start);
checkKernelErrors(
hipLaunchKernelGGL(( (apmm_w2a2), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK),
SHMEM_SZ, 0, W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT)));
hipEventRecord(bmma_end);
hipEventSynchronize(bmma_end);
hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
hipEventDestroy(bmma_start);
hipEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES;
printf("V72, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12);
#ifdef verify_output
printf("Validating results...\n");
checkCudaErrors(hipMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost));
int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
/* Copmpute reference matrix on CPU */
compute_ref(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
/* validation results */
validate_results(Output_h, Output_ref, M_GLOBAL, N_GLOBAL);
free(W_h);
free(X_h);
free(Output_h);
free(Output_ref);
#endif
checkCudaErrors(hipFree(reinterpret_cast<void *>(W)));
checkCudaErrors(hipFree(reinterpret_cast<void *>(X)));
checkCudaErrors(hipFree(reinterpret_cast<void *>(Output)));
}
return EXIT_SUCCESS;
}
|
194e65191dc5ff276f6fea2e0b7af65d20660332.cu
|
/*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <cuda.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 128
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#define CHUNK_K 1
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B
// matrix in shared memory to minimize possible bank conflicts. Before
// performing the nvcuda::wmma::mma_sync operation, the warp must load the
// matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the
// memory access pattern is not specified for that function, each lane in the
// warp can read one or multiple matrix elements from different matrix rows or
// columns. For shared memory, such access can result in bank conflicts if
// different rows / columns of the matrix map to the same bank. By shifting each
// row and column by a few bytes, we make sure that they map to different banks,
// thus reducing the number of possible bank conflicts. The number of 32
// one-byte "uint8_t" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by
// nvcuda::wmma::load_matrix_sync.
#define SKEW 0 // Updated for int4
#define checkKernelErrors(expr) \
do { \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
cudaGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
__global__ void apmm_w2a2(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) {
// GEMM configuration.
int K_TILES = K_GLOBAL / 128;
int W_bit_offset = M_GLOBAL*K_GLOBAL/128;
int X_bit_offset = N_GLOBAL*K_GLOBAL/128;
int ROW_BIT = K_GLOBAL/128;
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<M_GLOBAL; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// printf("W[%d][%d]: %x\n", i, j, *((int*)W+i*K_GLOBAL/32+j));
// }
// }
// }
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int b=0; b<xb; b++) {
// for(int i=0; i<N_GLOBAL; i++) {
// for(int j=0; j<K_GLOBAL/32; j++) {
// printf("bit: %d, X[%d][%d]: %x\n", b, i, j, *((int*)X+b*X_bit_offset + i*K_GLOBAL/32+j));
// }
// }
// }
// }
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = block_pos / (N_GLOBAL/32) * 32;
const unsigned int block_tile_j = block_pos % (N_GLOBAL/32) * 32;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_GLOBAL) {
break;
}
typedef union {
int4 vec;
int a[4];
} U4;
wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
for(int i=0; i < WARP_COL_TILES; i++)
for(int j = 0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const int4 *warp_ptr;
if (warpId < 2) {
warp_ptr = &W[block_tile_i * ROW_BIT] + warpId * 16 * ROW_BIT;
} else if (warpId < 4) {
warp_ptr = &W[block_tile_i * ROW_BIT + W_bit_offset] + (warpId-2) * 16 * ROW_BIT;
} else if (warpId < 6) {
warp_ptr = &X[block_tile_j * ROW_BIT] + (warpId -4) * 16 * ROW_BIT;
} else {
warp_ptr = &X[block_tile_j * ROW_BIT + X_bit_offset] + (warpId-6)*16*ROW_BIT;
}
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop.
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy
// the B matrix.
int *shmem_ptr = (int*)shmem + warpId*16*4*(CHUNK_K+SKEW) + (laneId/4)*4*(CHUNK_K+SKEW) + laneId%4;
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
// int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) +
// (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) +
// (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit.
int *lane_ptr = (int*)warp_ptr + laneId/4*ROW_BIT*4 + laneId%4 + tile_k*4;
*shmem_ptr = *lane_ptr;
shmem_ptr += 8*4*(CHUNK_K+SKEW);
lane_ptr += 8*ROW_BIT*4;
*shmem_ptr = *lane_ptr;
// U4 tmp_probe;
// tmp_probe.vec = *lane_ptr;
// printf("tmp_probe.a[0]: %d, tmp_probe.a[1]: %d, tmp_probe.a[2]: %d, tmp_probe.a[3]: %d\n", tmp_probe.a[0], tmp_probe.a[1], tmp_probe.a[2], tmp_probe.a[3]);
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=0; i<128; i++) {
// printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3));
// }
// }
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int t=0; t<a[i].num_elements; t++) {
// printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]);
// }
// printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step);
// }
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * 64 * 16 +
(warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8;
wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT);
}
}
__syncthreads();
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i=62; i<64; i++) {
// for(int j=0; j<64; j++) {
// printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64+j));
// }
// }
// }
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
// int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO.
size_t idx = threadIdx.x/8 * 64 + threadIdx.x%8*4;
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx;
U4 tmp0;
U4 tmp1;
U4 tmp2;
U4 tmp3;
U4 val;
tmp0.vec = *((int4*)shmem_warp_stream_ptr);
tmp1.vec = *((int4*)shmem_warp_stream_ptr+8);
tmp2.vec = *((int4*)shmem_warp_stream_ptr+32*16);
tmp3.vec = *((int4*)shmem_warp_stream_ptr+8+32*16);
// if (warpId == 0 && laneId == 0 && blockIdx.x==0) {
// for(int i = 0; i < 4; i++) {
// printf("tmp0.a[%d], %d ", 3-i, tmp0.a[3-i]);
// }
// printf("\n");
// for(int i = 0; i < 4; i++) {
// printf("tmp1.a[%d], %d ", 3-i, tmp1.a[3-i]);
// }
// printf("\n");
// }
val.a[0] = tmp0.a[0] + 2*tmp1.a[0] + 2*tmp2.a[0] + 4*tmp3.a[0];
val.a[1] = tmp0.a[1] + 2*tmp1.a[1] + 2*tmp2.a[1] + 4*tmp3.a[1];
val.a[2] = tmp0.a[2] + 2*tmp1.a[2] + 2*tmp2.a[2] + 4*tmp3.a[2];
val.a[3] = tmp0.a[3] + 2*tmp1.a[3] + 2*tmp2.a[3] + 4*tmp3.a[3];
// if (warpId == 7) {
// printf("warpId: %d, laneId: %d, idx: %d, val[%d].a: %d, %d, %d, %d, tmp0: %d %d %d %d, tmp1: %d %d %d %d \n", warpId, laneId, idx, i, val[i].a[0], val[i].a[1], val[i].a[2], val[i].a[3], tmp0.a[0], tmp0.a[1], tmp0.a[2], tmp0.a[3], tmp1.a[0], tmp1.a[1], tmp1.a[2], tmp1.a[3] );
// }
__syncthreads();
// This warp's pointer to the C matrix data to copy memory from to shared memory.
// TODO: May be moved outside the for loop.
size_t gmem_idx = block_tile_i*N_GLOBAL + block_tile_j + (threadIdx.x/8)*N_GLOBAL + (threadIdx.x%8)*4;
// printf("block_tile_i: %d, block_tile_j: %d, warpId: %d, laneId: %d, gmem_idx: %d\n", block_tile_i, block_tile_j, warpId, laneId, gmem_idx);
// Now that shared memory contains all the D tiles, stream them to global memory.
int *dst_gmem_warp_stream_ptr = &D[gmem_idx];
*((int4 *)(dst_gmem_warp_stream_ptr)) = val.vec;
__syncthreads();
}
}
void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){
int *W_int = (int*) W;
int *X_int = (int*) X;
for(int b=0; b<W_BIT; b++) {
for(int i = 0; i < M_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i;
W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
for(int b = 0; b<X_BIT; b++) {
for(int i = 0; i < N_GLOBAL; i++) {
for(int j = 0; j < K_GLOBAL/32; j++) {
// X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF;
// X_int[i*K_GLOBAL/32+j] = i*M_GLOBAL + j;
X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand();
}
}
}
}
int popcnt(int i) {
// Java: use int, and use >>> instead of >>
// C or C++: use int
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
}
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp % 2)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) {
int *W_int = (int*) W;
int *X_int = (int*) X;
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
ref_C[m*N_GLOBAL+n]= tmp;
}
}
}
void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int X_BIT, int W_BIT, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
int *W_int = (int*) W;
int *X_int = (int*) X;
int C_ref_before_decompose[M_GLOBAL*N_GLOBAL];
for (int m = 0; m < M_GLOBAL; m++) {
for (int n = 0; n < N_GLOBAL; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) {
int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile];
int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
}
}
}
C_ref_before_decompose[m*K_GLOBAL+n]= tmp;
}
}
for(int m=0; m<M_GLOBAL; m++) {
for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) {
int val[OUT_BIT];
for(int b=0; b<OUT_BIT; b++) val[b] = 0;
for(int n=0; n<32; n++) {
int tmp = C_ref_before_decompose[m*K_GLOBAL+n_tile*32+n];
tmp = (tmp - 128); // Can be modified for other quantized parameters.
for(int b=0; b<OUT_BIT; b++) {
int mask = 1;
val[b] = val[b] << 1;
val[b] = val[b] | ((mask<<b) & tmp);
}
}
for(int b=0; b<OUT_BIT; b++) {
ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile/32] = val[b];
}
}
}
}
void validate_results(int *C, int* ref_C, int M_, int N_) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i < M_; i++) {
for(int j = 0; j < N_; j++) {
int idx = i*N_+j;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) {
// Assume K_GLOBAL and N_GLOBAL is a multiplier of 32.
printf("Checking computed result with pack for correctness: ");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int m = 0; m < M_; m++) {
for(int n_tile = 0; n_tile < N_/32; n_tile++) {
for(int b=0; b<OUT_BIT; b++) {
int idx = b*M_*N_/32 + m*N_/32+n_tile;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("m: %d, n_tile: %d, b: %d, C: %d, ref_C: %d\n", m, n_tile, b, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
#define verify_output
int main(int argc, char **argv) {
int dev = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
int X_BIT = 2;
int W_BIT = 2;
for (int M_GLOBAL=128; M_GLOBAL<=1024; M_GLOBAL += 128 ) {
// int M_GLOBAL = 256;
int N_GLOBAL = M_GLOBAL;
int K_GLOBAL = M_GLOBAL;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT));
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL));
#ifdef verify_output
int4 *W_h = NULL;
int4 *X_h = NULL;
int *Output_h = NULL;
W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT);
X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT);
Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
printf("Preparing validation data for GPU...\n");
init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
checkCudaErrors(cudaMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, cudaMemcpyHostToDevice));
#endif
int SHMEM_SZ = 65536;
checkCudaErrors(cudaFuncSetAttribute(
apmm_w2a2, cudaFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 200;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
cudaEvent_t bmma_start;
cudaEvent_t bmma_end;
cudaEventCreate(&bmma_start);
cudaEventCreate(&bmma_end);
cudaEventRecord(bmma_start);
checkKernelErrors(
(apmm_w2a2<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK,
SHMEM_SZ>>>(W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT)));
cudaEventRecord(bmma_end);
cudaEventSynchronize(bmma_end);
cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
cudaEventDestroy(bmma_start);
cudaEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES;
printf("V72, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12);
#ifdef verify_output
printf("Validating results...\n");
checkCudaErrors(cudaMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost));
int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL);
/* Copmpute reference matrix on CPU */
compute_ref(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT);
/* validation results */
validate_results(Output_h, Output_ref, M_GLOBAL, N_GLOBAL);
free(W_h);
free(X_h);
free(Output_h);
free(Output_ref);
#endif
checkCudaErrors(cudaFree(reinterpret_cast<void *>(W)));
checkCudaErrors(cudaFree(reinterpret_cast<void *>(X)));
checkCudaErrors(cudaFree(reinterpret_cast<void *>(Output)));
}
return EXIT_SUCCESS;
}
|
2d689e0e512bcaafcc3a3cbb758514963f14a950.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void SharedMem2globalMem(float * d_memoryA, int iSize)
{
/* Amount of shared memory is determined by host call */
extern __shared__ float s_memoryA[];
/* Generate global index */
int iID = blockDim.x * blockIdx.x + threadIdx.x;
/* Get the number of available threads */
int iNumThreads = blockDim.x * gridDim.x;
/* Calculate number of elements */
int iNumElements = iSize / sizeof(float);
/* Read global memory (coalesce) to shared memory */
for(int i = iID; i < iNumElements; i += iNumThreads)
d_memoryA[i] = s_memoryA[i];
}
|
2d689e0e512bcaafcc3a3cbb758514963f14a950.cu
|
#include "includes.h"
__global__ void SharedMem2globalMem(float * d_memoryA, int iSize)
{
/* Amount of shared memory is determined by host call */
extern __shared__ float s_memoryA[];
/* Generate global index */
int iID = blockDim.x * blockIdx.x + threadIdx.x;
/* Get the number of available threads */
int iNumThreads = blockDim.x * gridDim.x;
/* Calculate number of elements */
int iNumElements = iSize / sizeof(float);
/* Read global memory (coalesce) to shared memory */
for(int i = iID; i < iNumElements; i += iNumThreads)
d_memoryA[i] = s_memoryA[i];
}
|
d8c9e46c30ac14834cf7ea45b57f9d1eac980702.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Joao Goncalves is a MSc Student at the University of Coimbra, Portugal
Copyright (C) 2012 Joao Goncalves
This file is part of GPUMLib.
GPUMLib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
//this example implements a SVM using the GPU. Can be used to train and classify binary datasets
#ifdef _WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif
#include <assert.h>
#include <float.h>
#include <fstream>
#include <iostream>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
//GPUMLib stuff
#include "../../common/CudaDefinitions.h"
#include "../../common/Utilities.h"
#include "../../memory/DeviceArray.h"
#include "../../memory/DeviceMatrix.h"
#include "../../memory/HostArray.h"
#include "../../memory/HostMatrix.h"
#include <hip/hip_runtime.h>
//! Comment or set this macro to zero to disable some runtime debugging info
#define DEBUG 1
#include "../../SVM/Settings.h"
#include "../../SVM/svm_kernel_type.h"
//SVM encapsulating class
#include "../../SVM/SVM.h"
using namespace std;
namespace GPUMLib {
typedef unsigned int uint;
//! Value separator tag for the CSV files
#define VALUE_SEPARATOR ",;"
//! Size of blocks (in elements) for reading/writing operations
#define BUFFERING_BLOCK_SIZE (1<<24)
#ifndef BUFFERING_BLOCK_SIZE
#define BUFFERING_BLOCK_SIZE (1<<20)
#endif
/**
* helper function for validLine()
*/
bool validCharacter(char &c) {
if (c >= '0' && c <= '9')
return true;
if (c == ',' || c == '.' || c == ' ' || c == '\r' || c == '\n' || c == '-' || c == '+' || c == 'e' || c == 'E')
return true;
return 0;
}
/**
* check if a line from the dataset is valid
* a line is valid if it only contains valid characters
*/
bool validLine(char * buf, int size) {
for (int i = 0; i < size; i++) {
char c = buf[i];
if (c == 0)
return true;
if (!validCharacter(c))
return false;
}
return true;
}
/**
* Counts the amount of samples (lines) in given comma separated file (CSV) file
* @param f The file to be used
* @return The number of samples in the file
*/
int getNumberOfSamples(FILE *f) {
//TODO: remove that dirty way using valid characters (because of e/E)
//start... from the beginning
fseek(f, 0, SEEK_SET);
//read
char * buf = new char[BUFFERING_BLOCK_SIZE];
int count = 0;
while (fgets(buf, BUFFERING_BLOCK_SIZE, f)) {
if (validLine(buf, BUFFERING_BLOCK_SIZE))
count++;
}
// if (DEBUG)
// cout << "Number of samples:\t" << count << endl;
free(buf);
return count;
}
/**
* Counts the amount of lines (\n) in given file
* @param f The file to be used
* @return The number of lines in the file
*/
int getNumberOfLines(FILE *f) {
int count = 0;
//start from the beginning
fseek(f, 0, SEEK_SET);
//read
char * buf = new char[BUFFERING_BLOCK_SIZE];
for (;;) {
//read a nice chunk (to minimize head seek overhead)
size_t amount_read = fread(buf, sizeof(char), BUFFERING_BLOCK_SIZE, f);
if (amount_read == 0)
break;
//count occurrences of '\n' in that chunk
for (size_t i = 0; i < amount_read; i++) {
if (buf[i] == '\n')
count++;
}
}
free(buf);
// if (DEBUG)
// cout << "Number of lines:\t" << count << endl;
return count;
}
/**
* Counts the amount of columns in first line of given CSV file.
* @param f The file to be used
* @return The number of columns in the file
*/
int getNumberOfColumns(FILE *f) {
//start from the beginning
fseek(f, 0, SEEK_SET);
//temporary storage
char * buf = new char[BUFFERING_BLOCK_SIZE];
//eat empty lines
bool gotvalidline = false;
while (!gotvalidline) {
fgets(buf, BUFFERING_BLOCK_SIZE, f);
if (buf[0] != '\n' && validLine(buf, BUFFERING_BLOCK_SIZE))
gotvalidline = true;
}
//eat first value
char* tok = strtok(buf, VALUE_SEPARATOR);
int num_columns = 1;
//count next values until the end of the line
while ((tok = strtok(NULL, VALUE_SEPARATOR)) != NULL)
num_columns++;
// if (DEBUG)
// cout << "Number of columns:\t" << num_columns << endl;
return num_columns;
}
/**
* Reads a CSV file as a dataset.
* @param f The file to be read
* @param samples The HostMatrix where to store the attributes/features for each sample/pattern
* @param classes The HostArray where to store the class of each sample/pattern
* @param ncols The number of columns in the CSV file (must be previously obtained)
* @param positive_class The value to be used to identify the positive class. The other values will be used as the negative class
*/
void readDataSet(FILE *f, GPUMLib::HostMatrix<cudafloat> & samples, GPUMLib::HostArray<int> & classes, int ncols, int positive_class) {
//start from the beginning
fseek(f, 0, SEEK_SET);
//read
char * buf = new char[BUFFERING_BLOCK_SIZE];
int row = 0;
int positives = 0;
int negatives = 0;
while (fgets(buf, BUFFERING_BLOCK_SIZE, f)) {
if (!validLine(buf, BUFFERING_BLOCK_SIZE))
continue;
//strrplchr(buf, ',', '.'); // replace , by .
//get first feature and convert to numeric
char *tok = strtok(buf, VALUE_SEPARATOR);
double val = strtod(tok, NULL); // atoi IS SLOWER!
samples(row, 0) = (float) val;
//do the same for the remaining features
for (int col = 1; col < ncols - 1; col++) {
tok = strtok(NULL, VALUE_SEPARATOR);
val = strtod(tok, NULL);
// store value
samples(row, col) = (float) val;
}
// get the class
tok = strtok(NULL, VALUE_SEPARATOR);
int c = strtol(tok, NULL, 10);
//we expect the class label to belong to {-1;1}
if (c == positive_class) {
classes[row] = 1;
negatives++;
} else {
classes[row] = -1;
positives++;
}
row++;
}
if (DEBUG) {
cout << "read dataset with " << row << " rows and " << ncols << " columns" << endl;
cout << "dataset with " << positives << " positives and " << negatives << " negatives" << endl;
cout << "positive to negative ratio is " << (double) positives / (double) negatives << endl;
}
}
//! Prints various classification metrics such as the Confusion Matrix, Accuracy, F-Score, etc.
//! \param targets A HostArray containing the target classes (real data)
//! \param predicted A HostArray containing the predicted classes (output of the classifier)
//! \param length The sample size
void showClassificationMetrics(GPUMLib::HostArray<int> &targets, GPUMLib::HostArray<int> &predicted, int length) {
//confusion matrix
int tp = 0;
int fp = 0;
int tn = 0;
int fn = 0;
int errors = 0;
//#pragma omp parallel for reduction(+:errors,tp,fp,tn,fn)
for (int i = 0; i < length; i++) {
//confusion matrix
if (predicted[i] == -1) {
if (targets[i] == -1) {
//TN
tn++;
} else {
//FN
fn++;
}
} else {
if (targets[i] == -1) {
//FP
fp++;
} else {
//TP
tp++;
}
}
int class_err = targets[i] - predicted[i];
if (class_err != 0)
errors++;
}
cout << "Confusion matrix:" << endl;
cout << "\t\t\tActual class" << endl;
cout << "\t\t\t-1\t1" << endl;
cout << "Predicted class\t-1\t" << tn << "\t" << fn << endl;
cout << "\t\t1\t" << fp << "\t" << tp << endl;
double precision = ((tp + fp) == 0 ? 0 : (double) (tp) / (double) (tp + fp));
cout << "Precision: " << precision << endl;
double recall = ((fn + tp) == 0 ? 0 : (double) (tp) / (double) (fn + tp));
cout << "Recall: " << recall << endl;
double false_positive_rate = ((fp + tn) == 0 ? 0 : (double) (fp) / (double) (fp + tn));
cout << "False Positive Rate: " << false_positive_rate << endl;
cout << "Specificity: " << 1.0 - false_positive_rate << endl;
cout << "False Discovery Rate: " << ((fp + tp) == 0?0:(double) (fp) / (double) (fp + tp)) << endl;
cout << "Accuracy: " << ((tp + tn + fp + fn) == 0?0:(double) (tp + tn) / (double) (tp + tn + fp + fn)) << endl;
cout << "F-score: " << ((recall + precision) <FLT_MIN?0:(2.0 * recall * precision) / (recall + precision)) << endl;
cout << "testing errors were " << errors << "/" << length << " = " << (double) errors / (double) length << endl;
}
//! Saves the SVM data/model (composed of the features, non-zero alphas (SVs) and the bias) to a file
//! \param model_filename The filename where to save data to
//! \param model The HostMatrix containing the non-zero alphas (SVs) and the features
//! \param b The hyperplane's bias
void saveModel(char * model_filename, GPUMLib::HostMatrix<cudafloat> &model, cudafloat b) {
if (DEBUG)
cout << "saving model to file..." << endl;
char * WRITE_BUF = new char[BUFFERING_BLOCK_SIZE];
FILE *model_file;
model_file = fopen(model_filename, "w");
if (model_file) {
//buffer process
setvbuf(model_file, WRITE_BUF, _IOFBF, BUFFERING_BLOCK_SIZE);
//first line gives the amount of support vectors
fprintf(model_file, "%d\n", model.Rows());
//second line gives the amount of features
fprintf(model_file, "%d\n", model.Columns() - 2);
//third line the hyperplane offset
fprintf(model_file, "%f\n", b);
//the remaining lines are in the form:
//alpha_i | class_i | attribute_0 ... attribute_n-1
for (int sv_i = 0; sv_i < model.Rows(); sv_i++) {
for (int col = 0; col < model.Columns(); col++) {
fprintf(model_file, "%f", model(sv_i, col));
if (col < model.Columns() - 1)
fprintf(model_file, ",");
}
fprintf(model_file, "\n");
}
fclose(model_file);
printf("model saved to file %d\n", model_file);
} else
cout << "Err: Unable to open model file for write." << endl;
delete WRITE_BUF;
}
//! Returns the amount of cores per SM depending on its architecture (taken from nvidia's cutil)
//! \param major The Major revision number of the reported CUDA support
//! \param minor The Minor revision number of the reported CUDA support
//! \return The amount of Cuda Cores / SPs
inline int convertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct {
int SM; // 0xMm (hexadecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{{0x10, 8 },
{ 0x11, 8 },
{ 0x12, 8 },
{ 0x13, 8 },
{ 0x20, 32 },
{ 0x21, 48 },
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor) ) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
printf("MapSMtoCores undefined SMversion %d.%d!\n", major, minor);
return -1;
}
//! Automatically selects the fastest available compute device, if it fails to find one, it automatically aborts execution
void selectFastestDevice(){
int num_devices=0;
int device=0;
hipGetDeviceCount(&num_devices);
cout << "found " << num_devices << " CUDA devices" << endl;
if(num_devices==hipErrorNoDevice){
puts("hipErrorNoDevice");
}else if(num_devices==hipErrorInsufficientDriver){
puts("hipErrorInsufficientDriver");
}
if (num_devices > 0) {
int max_multiprocessors = 0;
int fastest_device = -1;
// puts("-------------------");
for (device = 0; device < num_devices; device++) {
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, device);
//taken from RQ's source code (RAN.cpp)
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
fastest_device = device;
}
}
cout << "::: using CUDA device " << fastest_device << endl;
hipSetDevice(fastest_device);
}else{
cout << "no CUDA device available... aborting" << endl;
exit(-1);
}
}
//! Load SVM data (SVs and bias) from a file
//! \param model_filename The filename where to read data from
//! \param n_sv The number of Support Vectors in the file
//! \param ndims The number of features in the file
//! \param h_b The hyperplane's offset
//! \param model A HostMatrix containing the model (features and alphas)
//! \return 0 if successfully loaded the data, -1 otherwise
int readModel(char * model_filename, int n_sv, int &ndims,
cudafloat &h_b, GPUMLib::HostMatrix<cudafloat> &model) {
cout << "loading model from file..." << endl;
ifstream model_file(model_filename);
if (model_file.is_open()) {
//first line tells the amount of SVs
model_file >> n_sv;
//second tells the amount of features
model_file >> ndims;
ndims = ndims - 2;
//third the hyperplanes offset
model_file >> h_b;
//create the model
model.ResizeWithoutPreservingData(n_sv, ndims);
for (int row = 0; row < model.Rows(); row++) {
for (int col = 0; col < model.Columns(); col++) {
cudafloat val;
model_file >> val;
model(row, col) = val;
}
}
model_file.close();
printf("read model from file %s with %d SVs and %d features\n", model_filename, n_sv, ndims);
return 0;
} else {
cout << "Err: Unable to open model file for reading." << endl;
return -1;
}
}
//! Helper function to return precision delta time for 3 counters since last call based upon host high performance counter.
//! Retrieved from shrUtils.h by NVIDIA.
//! \param iCounterID The counter to be used (0, 1 or 2)
//! \return The elapsed time since last call for the given counter
double shrDeltaT(int iCounterID) {
// local var for computation of microseconds since last call
double DeltaT = -1.0;
#ifdef _WIN32 // Windows version of precision host timer
// Variables that need to retain state between calls
static LARGE_INTEGER liOldCount0 = { { 0, 0 } };
static LARGE_INTEGER liOldCount1 = { { 0, 0 } };
static LARGE_INTEGER liOldCount2 = { { 0, 0 } };
// locals for new count, new freq and new time delta
LARGE_INTEGER liNewCount, liFreq;
if (QueryPerformanceFrequency(&liFreq)) {
// Get new counter reading
QueryPerformanceCounter(&liNewCount);
// Update the requested timer
switch (iCounterID) {
case 0: {
// Calculate time difference for timer 0. (zero when called the first time)
DeltaT = liOldCount0.LowPart ? (((double) liNewCount.QuadPart - (double) liOldCount0.QuadPart) / (double) liFreq.QuadPart) : 0.0;
// Reset old count to new
liOldCount0 = liNewCount;
break;
}
case 1: {
// Calculate time difference for timer 1. (zero when called the first time)
DeltaT = liOldCount1.LowPart ? (((double) liNewCount.QuadPart - (double) liOldCount1.QuadPart) / (double) liFreq.QuadPart) : 0.0;
// Reset old count to new
liOldCount1 = liNewCount;
break;
}
case 2: {
// Calculate time difference for timer 2. (zero when called the first time)
DeltaT = liOldCount2.LowPart ? (((double) liNewCount.QuadPart - (double) liOldCount2.QuadPart) / (double) liFreq.QuadPart) : 0.0;
// Reset old count to new
liOldCount2 = liNewCount;
break;
}
default: {
// Requested counter ID out of range
return -9999.0;
}
}
// Returns time difference in seconds sunce the last call
return DeltaT;
} else {
// No high resolution performance counter
return -9999.0;
}
#else
// Linux version of precision host timer. See http://www.informit.com/articles/article.aspx?p=23618&seqNum=8
static struct timeval _NewTime; // new wall clock time (struct representation in seconds and microseconds)
static struct timeval _OldTime0;// old wall clock time 0(struct representation in seconds and microseconds)
static struct timeval _OldTime1;// old wall clock time 1(struct representation in seconds and microseconds)
static struct timeval _OldTime2;// old wall clock time 2(struct representation in seconds and microseconds)
// Get new counter reading
gettimeofday(&_NewTime, NULL);
switch (iCounterID)
{
case 0:
{
// Calculate time difference for timer 0. (zero when called the first time)
DeltaT = ((double)_NewTime.tv_sec + 1.0e-6 * (double)_NewTime.tv_usec) - ((double)_OldTime0.tv_sec + 1.0e-6 * (double)_OldTime0.tv_usec);
// Reset old time 0 to new
_OldTime0.tv_sec = _NewTime.tv_sec;
_OldTime0.tv_usec = _NewTime.tv_usec;
break;
}
case 1:
{
// Calculate time difference for timer 1. (zero when called the first time)
DeltaT = ((double)_NewTime.tv_sec + 1.0e-6 * (double)_NewTime.tv_usec) - ((double)_OldTime1.tv_sec + 1.0e-6 * (double)_OldTime1.tv_usec);
// Reset old time 1 to new
_OldTime1.tv_sec = _NewTime.tv_sec;
_OldTime1.tv_usec = _NewTime.tv_usec;
break;
}
case 2:
{
// Calculate time difference for timer 2. (zero when called the first time)
DeltaT = ((double)_NewTime.tv_sec + 1.0e-6 * (double)_NewTime.tv_usec) - ((double)_OldTime2.tv_sec + 1.0e-6 * (double)_OldTime2.tv_usec);
// Reset old time 2 to new
_OldTime2.tv_sec = _NewTime.tv_sec;
_OldTime2.tv_usec = _NewTime.tv_usec;
break;
}
default:
{
// Requested counter ID out of range
return -9999.0;
}
}
// Returns time difference in seconds since the last call
return DeltaT;
#endif
}
//! Main function to launch the SVM, either to train, classify or both
int main(int argc, char **argv) {
//disable stdout buffering
setvbuf(stdout, NULL, _IONBF, 0);
setvbuf(stderr, NULL, _IONBF, 0);
bool train_model = false;
bool classify_dataset = false;
char * training_filename = NULL;
char * testing_filename = NULL;
char * model_filename = NULL;
char * classification_results_filename = NULL;
GPUMLib::svm_kernel_type kernel_type = SVM_KT_LINEAR;
cudafloat * kernel_args = new cudafloat[4];
kernel_args[0] = 1.0;
kernel_args[1] = 1.0;
kernel_args[2] = 1.0;
kernel_args[3] = 1.0;
cudafloat constant_c = CUDA_VALUE(1.0);
cudafloat constant_c_negative = constant_c;
cudafloat constant_c_positive = constant_c;
cudafloat constant_epsilon = CUDA_VALUE(0.00001);
cudafloat constant_tau = CUDA_VALUE(0.001);
int amount_threads = MAX_THREADS_PER_BLOCK;
bool arguments_error = false;
int positive_class = 1;
//read arguments and compile them
GPUMLib::Settings settings(argc, argv);
unsigned int aa = settings.getAmountArguments();
//go through all arguments
for (size_t i = 0; i < aa; i++) {
Argument* a = settings.getArgument(i);
//training file
if (strcmp(a->argument, "-trainingset") == 0) {
//cout << "extracting training file" << endl;
if (a->value != NULL) {
training_filename = a->value;
} else {
cout << "no training file was given" << endl;
arguments_error = true;
}
}
//classifying file
else if (strcmp(a->argument, "-testingset") == 0) {
//cout << "extracting testing file" << endl;
if (a->value != NULL) {
testing_filename = a->value;
} else {
cout << "no testing file was given" << endl;
arguments_error = true;
}
}
//classification results file
else if (strcmp(a->argument, "-cr") == 0) {
//cout << "extracting classification results file" << endl;
if (a->value != NULL) {
classification_results_filename = a->value;
} else {
cout << "no classification results file was given" << endl;
arguments_error = true;
}
}
//model file
else if (strcmp(a->argument, "-model") == 0) {
//cout << "extracting model file" << endl;
if (a->value != NULL) {
model_filename = a->value;
} else {
cout << "no model file given" << endl;
arguments_error = true;
}
}
//train?
else if (strcmp(a->argument, "-train") == 0) {
//cout << "user wants to train model" << endl;
train_model = true;
}
//classify?
else if (strcmp(a->argument, "-classify") == 0) {
//cout << "user wants to classify dataset" << endl;
classify_dataset = true;
}
//kernel type
else if (strcmp(a->argument, "-k") == 0) {
//cout << "extracting kernel type" << endl;
if (a->value != NULL) {
if (strcmp(a->value, "lin") == 0) {
kernel_type = SVM_KT_LINEAR;
} else if (strcmp(a->value, "pol") == 0) {
kernel_type = SVM_KT_POLYNOMIAL;
} else if (strcmp(a->value, "rbf") == 0) {
kernel_type = SVM_KT_RBF;
} else if (strcmp(a->value, "sig") == 0) {
kernel_type = SVM_KT_SIGMOID;
} else if (strcmp(a->value, "ukf") == 0) {
kernel_type = SVM_KT_UKF;
} else {
cout << "unknown kernel type: " << a->value << endl;
arguments_error = true;
}
} else {
cout << "no kernel type was given" << endl;
arguments_error = true;
}
}
//kernel arguments
//a
else if (strcmp(a->argument, "-a") == 0) {
//cout << "extracting argument <a>" << endl;
if (a->value != NULL) {
kernel_args[0] = (cudafloat) atof(a->value);
} else {
cout << "no argument <a> was given" << endl;
arguments_error = true;
}
}
//b
else if (strcmp(a->argument, "-b") == 0) {
//cout << "extracting argument <b>" << endl;
if (a->value != NULL) {
kernel_args[1] = (cudafloat) atof(a->value);
} else {
cout << "no argument <b> was given" << endl;
arguments_error = true;
}
}
//c
else if (strcmp(a->argument, "-c") == 0) {
//cout << "extracting argument <c>" << endl;
if (a->value != NULL) {
kernel_args[2] = (cudafloat) atof(a->value);
} else {
cout << "no argument <c> was given" << endl;
arguments_error = true;
}
}
//penalization constant
else if (strcmp(a->argument, "-C") == 0) {
//cout << "extracting penalization constant C" << endl;
if (a->value != NULL) {
constant_c_negative = (cudafloat) atof(a->value);
constant_c_positive = (cudafloat) atof(a->value);
} else {
cout << "no penalization constant was given" << endl;
arguments_error = true;
}
}
//negative penalization constant
else if (strcmp(a->argument, "-Cn") == 0) {
//cout << "extracting penalization constant C" << endl;
if (a->value != NULL) {
constant_c_negative = (cudafloat) atof(a->value);
} else {
cout << "no negative penalization constant was given" << endl;
arguments_error = true;
}
}
//positive penalization constant
else if (strcmp(a->argument, "-Cp") == 0) {
//cout << "extracting penalization constant C" << endl;
if (a->value != NULL) {
constant_c_positive = (cudafloat) atof(a->value);
} else {
cout << "no positive penalization constant was given" << endl;
arguments_error = true;
}
}
//optimality conditions tolerance
else if (strcmp(a->argument, "-eps") == 0) {
//cout << "extracting optimality conditions tolerance" << endl;
if (a->value != NULL) {
constant_epsilon = (cudafloat) atof(a->value);
} else {
cout << "no optimality conditions tolerance was given" << endl;
arguments_error = true;
}
}
//optimality gap size
else if (strcmp(a->argument, "-tau") == 0) {
//cout << "extracting optimality gap size" << endl;
if (a->value != NULL) {
constant_tau = (cudafloat) atof(a->value);
} else {
cout << "no optimality gap size was given" << endl;
arguments_error = true;
}
}
//amount of threads
else if (strcmp(a->argument, "-threads") == 0) {
//cout << "extracting amount of threads" << endl;
if (a->value != NULL) {
amount_threads = atoi(a->value);
} else {
cout << "no amount of threads was given" << endl;
arguments_error = true;
}
}
//positive class
else if (strcmp(a->argument, "-positive") == 0) {
if (a->value != NULL) {
positive_class = atoi(a->value);
} else {
cout << "positive label was not given" << endl;
arguments_error = true;
}
}
}
//for training we require the training dataset... duh
if (train_model) {
if (training_filename == NULL) {
cout << "Error: no training dataset was given - Aborting." << endl;
arguments_error = true;
} else {
//cout << "training dataset is " << training_filename << endl;
}
}
//for classifying we require both the training and testing datasets
if (classify_dataset) {
//if in this execution the model is not trained, it must be read from somewhere...
if (train_model == false && model_filename == NULL) {
cout << "Error: no model file was given." << endl;
return -1;
} else {
//cout << "model file is " << model_filename << endl;
}
if (testing_filename == NULL) {
cout << "Error: no testing dataset was given." << endl;
return -1;
} else {
//cout << "testing dataset is " << model_filename << endl;
}
}
if (classify_dataset == false && train_model == false) {
cout << "Error: the program was not instructed to train nor to classify." << endl;
arguments_error = true;
}
if (arguments_error) {
cout << "Error: invalid arguments." << endl;
cout << "----------------------------------------------------------" << endl;
cout << "The arguments are the following:" << endl;
cout << "" << endl;
cout << "to train using the training samples" << endl;
cout << "\t -train" << endl;
cout << "" << endl;
cout << "to classify using the trained svm model" << endl;
cout << "\t -classify" << endl;
cout << "" << endl;
cout << "file with the training set (filename) - required:" << endl;
cout << "\t -trainingset <training file>" << endl;
cout << "" << endl;
cout << "file with the testing set (filename) - required:" << endl;
cout << "\t -testingset <training file>" << endl;
cout << "" << endl;
cout << "file where to store the trained svm model (filename):" << endl;
cout << "\t -model <output file>" << endl;
cout << "" << endl;
cout << "file where to store the classification results (filename):" << endl;
cout << "\t -cr <output file>" << endl;
cout << "" << endl;
cout << "which kernel to use (text):" << endl;
cout << "\t -k <type>" << endl;
cout << "\t where <type> can be one of the following:" << endl;
cout << "\t\t lin - for the linear kernel: K(x1,x2) = x1.x2" << endl;
cout << "\t\t pol - for the polynomial kernel: K(x1,x2) = a*(x1.x2+b)^c" << endl;
cout << "\t\t rbf - for the gaussian kernel: K(x1,x2) = e^(-a*||x1-x2||^2)" << endl;
cout << "\t\t sig - for the sigmoid kernel: K(x1,x2) = tanh(a*(x1.x2)+b)" << endl;
cout << "\t\t ukf - for the universal function kernel: K(x1,x2) = a*(||x1-x2||^2+b^2)^-c" << endl;
cout << "\t being x1.x2 the dot product between vectors x1 and x2" << endl;
cout << "" << endl;
cout << "kernel arguments (decimal number):" << endl;
cout << "\t -a <value>" << endl;
cout << "\t -b <value>" << endl;
cout << "\t -c <value>" << endl;
cout << "" << endl;
cout << "penalization constant C (decimal number):" << endl;
cout << "\t -C <value>" << endl;
cout << "" << endl;
cout << "optimality conditions tolerance, Epsilon, which allows some numerical uncertainty on the heuristics (decimal number):" << endl;
cout << "\t -eps <value>" << endl;
cout << "" << endl;
cout << "optimality gap size, Tau, which regulates the training convergence (decimal number):" << endl;
cout << "\t -tau <value>" << endl;
cout << "" << endl;
cout << "amount of threads to use in trainer and classifier (integer, 0 = automatic):" << endl;
cout << "\t -threads <value>" << endl;
cout << "" << endl;
cout << "ABORTING." << endl;
return -1;
}
switch (kernel_type) {
case SVM_KT_RBF:
if (DEBUG)
cout << "using RBF kernel with gamma = " << kernel_args[0] << endl;
break;
case SVM_KT_LINEAR:
if (DEBUG)
cout << "using linear kernel" << endl;
break;
case SVM_KT_POLYNOMIAL:
if (DEBUG)
cout << "using polynomial kernel" << endl;
break;
case SVM_KT_SIGMOID:
if (DEBUG)
cout << "using sigmoid kernel" << endl;
break;
case SVM_KT_UKF:
if (DEBUG)
cout << "using universal kernel function with L = " << kernel_args[0] << " b (sigma) = " << kernel_args[1] << " and c (alpha) = " << kernel_args[2]
<< endl;
break;
}
if (constant_c_negative <= 0 || constant_c_positive <= 0) {
cout << "Error: invalid value for C" << endl;
return -1;
}
if (DEBUG)
cout << "C negative = " << constant_c_negative << " C positive = " << constant_c_positive << endl;
if (DEBUG)
cout << "epsilon = " << constant_epsilon << endl;
if (constant_tau <= 0) {
cout << "Error: invalid value for epsilon" << endl;
return -1;
}
if (DEBUG)
cout << "tau = " << constant_tau << endl;
// read training dataset file
// read training dataset file
// read training dataset file
//create a matrix to hold the model
//structure: alpha_i | class_i | attribute_0 ... attribute_n-1
GPUMLib::HostMatrix<cudafloat> h_model(1, 1, GPUMLib::ColumnMajor);
int n_sv = -1;
int ndims = -1;
cudafloat h_b = CUDA_VALUE(0.0);
selectFastestDevice();
//create a instance to manage the GPU SVM
GPUMLib::SVM svm;
//train model if requested
if (train_model) {
//build matrix for holding training data set
cout << "reading training dataset file " << training_filename << endl;
FILE *f_input = fopen(training_filename, "r");
if (f_input == NULL) {
cout << "error while reading training dataset file" << endl;
return -1;
}
ndims = getNumberOfColumns(f_input) - 1;
int training_dataset_size = getNumberOfSamples(f_input);
//cout << "allocating storage for training dataset:" << training_filename << endl;
//create the storage in hosts memory for the dataset
GPUMLib::HostMatrix<cudafloat> h_samples(training_dataset_size, ndims, GPUMLib::ColumnMajor);
GPUMLib::HostArray<int> h_classes(training_dataset_size);
// create data structures for storing alphas
GPUMLib::HostArray<cudafloat> h_alphas(training_dataset_size);
//read the dataset
readDataSet(f_input, h_samples, h_classes, ndims + 1, positive_class);
fclose(f_input);
shrDeltaT(1);
shrDeltaT(1);
double t0 = shrDeltaT(1);
svm.train(h_samples, h_classes, constant_c_negative, constant_c_positive,
constant_epsilon, constant_tau, kernel_type, kernel_args, amount_threads, h_alphas,
//training_dataset_size,
n_sv, h_model,
//ndims,
h_b);
double t1 = shrDeltaT(1);
printf("training took %f s\n", t1 - t0);
//if requested save model to a file
if (model_filename != NULL) {
saveModel(model_filename, h_model, h_b);
}
}
if (classify_dataset) {
puts("------------------");
// if in this call the model hasn't been created, load it
if (!train_model) {
if (readModel(model_filename, n_sv, ndims, h_b, h_model) != 0) {
cout << "error while reading model" << endl;
return -1;
}
}
printf("using model with %d SVs and %d features\n", n_sv, ndims);
// read testing dataset file
// read testing dataset file
// read testing dataset file
//build matrix for holding testing data set
cout << "reading testing dataset file " << testing_filename << endl;
FILE *f_input_test = fopen(testing_filename, "r");
if (f_input_test == NULL) {
cout << "error while reading testing dataset file" << endl;
return -1;
}
int testing_dataset_size = getNumberOfSamples(f_input_test);
GPUMLib::HostMatrix<cudafloat> h_testing_samples(testing_dataset_size, ndims, GPUMLib::ColumnMajor);
GPUMLib::HostArray<int> h_testing_classes(testing_dataset_size);
//read the dataset
readDataSet(f_input_test, h_testing_samples, h_testing_classes, ndims + 1, positive_class);
fclose(f_input_test);
// start classifying phase
shrDeltaT(1);
shrDeltaT(1);
double t0 = shrDeltaT(1);
GPUMLib::HostArray<int> h_testing_results(testing_dataset_size);
svm.classify(h_model, h_testing_samples, kernel_args, amount_threads, kernel_type, n_sv, h_b, ndims, h_testing_results);
double t1 = shrDeltaT(1);
printf("classification took %f s\n", t1 - t0);
// for (int i=0;i<h_testing_results.Length();i++){
// printf("target\t%d\tpredicted\t%d\n", h_testing_classes[i], h_testing_results[i]);
// }
showClassificationMetrics(h_testing_classes, h_testing_results, testing_dataset_size);
//if requested save results to a file
if (classification_results_filename != NULL) {
if (DEBUG)
cout << "saving classification results to file " << classification_results_filename << endl;
char * WRITE_BUF = new char[BUFFERING_BLOCK_SIZE];
FILE *model_file;
model_file = fopen(classification_results_filename, "w");
if (model_file) {
//buffer process
setvbuf(model_file, WRITE_BUF, _IOFBF, BUFFERING_BLOCK_SIZE);
//give the amount of samples
fprintf(model_file, "#%d\n", testing_dataset_size);
//first line gives a comment
fprintf(model_file, "target,predicted\n");
for (int i = 0; i < testing_dataset_size; i++) {
fprintf(model_file, "%d,%d\n", h_testing_classes[i], h_testing_results[i]);
}
fclose(model_file);
} else
cout << "Err: Unable to open classification results file for write." << endl;
delete WRITE_BUF;
}
}
delete kernel_args;
if (DEBUG)
cout << "exiting..." << endl;
return 0;
}
} //namespace
int main(int argc, char **argv){
GPUMLib::main(argc, argv);
}
|
d8c9e46c30ac14834cf7ea45b57f9d1eac980702.cu
|
/*
Joao Goncalves is a MSc Student at the University of Coimbra, Portugal
Copyright (C) 2012 Joao Goncalves
This file is part of GPUMLib.
GPUMLib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
//this example implements a SVM using the GPU. Can be used to train and classify binary datasets
#ifdef _WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif
#include <assert.h>
#include <float.h>
#include <fstream>
#include <iostream>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
//GPUMLib stuff
#include "../../common/CudaDefinitions.h"
#include "../../common/Utilities.h"
#include "../../memory/DeviceArray.h"
#include "../../memory/DeviceMatrix.h"
#include "../../memory/HostArray.h"
#include "../../memory/HostMatrix.h"
#include <cuda.h>
//! Comment or set this macro to zero to disable some runtime debugging info
#define DEBUG 1
#include "../../SVM/Settings.h"
#include "../../SVM/svm_kernel_type.h"
//SVM encapsulating class
#include "../../SVM/SVM.h"
using namespace std;
namespace GPUMLib {
typedef unsigned int uint;
//! Value separator tag for the CSV files
#define VALUE_SEPARATOR ",;"
//! Size of blocks (in elements) for reading/writing operations
#define BUFFERING_BLOCK_SIZE (1<<24)
#ifndef BUFFERING_BLOCK_SIZE
#define BUFFERING_BLOCK_SIZE (1<<20)
#endif
/**
* helper function for validLine()
*/
bool validCharacter(char &c) {
if (c >= '0' && c <= '9')
return true;
if (c == ',' || c == '.' || c == ' ' || c == '\r' || c == '\n' || c == '-' || c == '+' || c == 'e' || c == 'E')
return true;
return 0;
}
/**
* check if a line from the dataset is valid
* a line is valid if it only contains valid characters
*/
bool validLine(char * buf, int size) {
for (int i = 0; i < size; i++) {
char c = buf[i];
if (c == 0)
return true;
if (!validCharacter(c))
return false;
}
return true;
}
/**
* Counts the amount of samples (lines) in given comma separated file (CSV) file
* @param f The file to be used
* @return The number of samples in the file
*/
int getNumberOfSamples(FILE *f) {
//TODO: remove that dirty way using valid characters (because of e/E)
//start... from the beginning
fseek(f, 0, SEEK_SET);
//read
char * buf = new char[BUFFERING_BLOCK_SIZE];
int count = 0;
while (fgets(buf, BUFFERING_BLOCK_SIZE, f)) {
if (validLine(buf, BUFFERING_BLOCK_SIZE))
count++;
}
// if (DEBUG)
// cout << "Number of samples:\t" << count << endl;
free(buf);
return count;
}
/**
* Counts the amount of lines (\n) in given file
* @param f The file to be used
* @return The number of lines in the file
*/
int getNumberOfLines(FILE *f) {
int count = 0;
//start from the beginning
fseek(f, 0, SEEK_SET);
//read
char * buf = new char[BUFFERING_BLOCK_SIZE];
for (;;) {
//read a nice chunk (to minimize head seek overhead)
size_t amount_read = fread(buf, sizeof(char), BUFFERING_BLOCK_SIZE, f);
if (amount_read == 0)
break;
//count occurrences of '\n' in that chunk
for (size_t i = 0; i < amount_read; i++) {
if (buf[i] == '\n')
count++;
}
}
free(buf);
// if (DEBUG)
// cout << "Number of lines:\t" << count << endl;
return count;
}
/**
* Counts the amount of columns in first line of given CSV file.
* @param f The file to be used
* @return The number of columns in the file
*/
int getNumberOfColumns(FILE *f) {
//start from the beginning
fseek(f, 0, SEEK_SET);
//temporary storage
char * buf = new char[BUFFERING_BLOCK_SIZE];
//eat empty lines
bool gotvalidline = false;
while (!gotvalidline) {
fgets(buf, BUFFERING_BLOCK_SIZE, f);
if (buf[0] != '\n' && validLine(buf, BUFFERING_BLOCK_SIZE))
gotvalidline = true;
}
//eat first value
char* tok = strtok(buf, VALUE_SEPARATOR);
int num_columns = 1;
//count next values until the end of the line
while ((tok = strtok(NULL, VALUE_SEPARATOR)) != NULL)
num_columns++;
// if (DEBUG)
// cout << "Number of columns:\t" << num_columns << endl;
return num_columns;
}
/**
* Reads a CSV file as a dataset.
* @param f The file to be read
* @param samples The HostMatrix where to store the attributes/features for each sample/pattern
* @param classes The HostArray where to store the class of each sample/pattern
* @param ncols The number of columns in the CSV file (must be previously obtained)
* @param positive_class The value to be used to identify the positive class. The other values will be used as the negative class
*/
void readDataSet(FILE *f, GPUMLib::HostMatrix<cudafloat> & samples, GPUMLib::HostArray<int> & classes, int ncols, int positive_class) {
//start from the beginning
fseek(f, 0, SEEK_SET);
//read
char * buf = new char[BUFFERING_BLOCK_SIZE];
int row = 0;
int positives = 0;
int negatives = 0;
while (fgets(buf, BUFFERING_BLOCK_SIZE, f)) {
if (!validLine(buf, BUFFERING_BLOCK_SIZE))
continue;
//strrplchr(buf, ',', '.'); // replace , by .
//get first feature and convert to numeric
char *tok = strtok(buf, VALUE_SEPARATOR);
double val = strtod(tok, NULL); // atoi IS SLOWER!
samples(row, 0) = (float) val;
//do the same for the remaining features
for (int col = 1; col < ncols - 1; col++) {
tok = strtok(NULL, VALUE_SEPARATOR);
val = strtod(tok, NULL);
// store value
samples(row, col) = (float) val;
}
// get the class
tok = strtok(NULL, VALUE_SEPARATOR);
int c = strtol(tok, NULL, 10);
//we expect the class label to belong to {-1;1}
if (c == positive_class) {
classes[row] = 1;
negatives++;
} else {
classes[row] = -1;
positives++;
}
row++;
}
if (DEBUG) {
cout << "read dataset with " << row << " rows and " << ncols << " columns" << endl;
cout << "dataset with " << positives << " positives and " << negatives << " negatives" << endl;
cout << "positive to negative ratio is " << (double) positives / (double) negatives << endl;
}
}
//! Prints various classification metrics such as the Confusion Matrix, Accuracy, F-Score, etc.
//! \param targets A HostArray containing the target classes (real data)
//! \param predicted A HostArray containing the predicted classes (output of the classifier)
//! \param length The sample size
void showClassificationMetrics(GPUMLib::HostArray<int> &targets, GPUMLib::HostArray<int> &predicted, int length) {
//confusion matrix
int tp = 0;
int fp = 0;
int tn = 0;
int fn = 0;
int errors = 0;
//#pragma omp parallel for reduction(+:errors,tp,fp,tn,fn)
for (int i = 0; i < length; i++) {
//confusion matrix
if (predicted[i] == -1) {
if (targets[i] == -1) {
//TN
tn++;
} else {
//FN
fn++;
}
} else {
if (targets[i] == -1) {
//FP
fp++;
} else {
//TP
tp++;
}
}
int class_err = targets[i] - predicted[i];
if (class_err != 0)
errors++;
}
cout << "Confusion matrix:" << endl;
cout << "\t\t\tActual class" << endl;
cout << "\t\t\t-1\t1" << endl;
cout << "Predicted class\t-1\t" << tn << "\t" << fn << endl;
cout << "\t\t1\t" << fp << "\t" << tp << endl;
double precision = ((tp + fp) == 0 ? 0 : (double) (tp) / (double) (tp + fp));
cout << "Precision: " << precision << endl;
double recall = ((fn + tp) == 0 ? 0 : (double) (tp) / (double) (fn + tp));
cout << "Recall: " << recall << endl;
double false_positive_rate = ((fp + tn) == 0 ? 0 : (double) (fp) / (double) (fp + tn));
cout << "False Positive Rate: " << false_positive_rate << endl;
cout << "Specificity: " << 1.0 - false_positive_rate << endl;
cout << "False Discovery Rate: " << ((fp + tp) == 0?0:(double) (fp) / (double) (fp + tp)) << endl;
cout << "Accuracy: " << ((tp + tn + fp + fn) == 0?0:(double) (tp + tn) / (double) (tp + tn + fp + fn)) << endl;
cout << "F-score: " << ((recall + precision) <FLT_MIN?0:(2.0 * recall * precision) / (recall + precision)) << endl;
cout << "testing errors were " << errors << "/" << length << " = " << (double) errors / (double) length << endl;
}
//! Saves the SVM data/model (composed of the features, non-zero alphas (SVs) and the bias) to a file
//! \param model_filename The filename where to save data to
//! \param model The HostMatrix containing the non-zero alphas (SVs) and the features
//! \param b The hyperplane's bias
void saveModel(char * model_filename, GPUMLib::HostMatrix<cudafloat> &model, cudafloat b) {
if (DEBUG)
cout << "saving model to file..." << endl;
char * WRITE_BUF = new char[BUFFERING_BLOCK_SIZE];
FILE *model_file;
model_file = fopen(model_filename, "w");
if (model_file) {
//buffer process
setvbuf(model_file, WRITE_BUF, _IOFBF, BUFFERING_BLOCK_SIZE);
//first line gives the amount of support vectors
fprintf(model_file, "%d\n", model.Rows());
//second line gives the amount of features
fprintf(model_file, "%d\n", model.Columns() - 2);
//third line the hyperplane offset
fprintf(model_file, "%f\n", b);
//the remaining lines are in the form:
//alpha_i | class_i | attribute_0 ... attribute_n-1
for (int sv_i = 0; sv_i < model.Rows(); sv_i++) {
for (int col = 0; col < model.Columns(); col++) {
fprintf(model_file, "%f", model(sv_i, col));
if (col < model.Columns() - 1)
fprintf(model_file, ",");
}
fprintf(model_file, "\n");
}
fclose(model_file);
printf("model saved to file %d\n", model_file);
} else
cout << "Err: Unable to open model file for write." << endl;
delete WRITE_BUF;
}
//! Returns the amount of cores per SM depending on its architecture (taken from nvidia's cutil)
//! \param major The Major revision number of the reported CUDA support
//! \param minor The Minor revision number of the reported CUDA support
//! \return The amount of Cuda Cores / SPs
inline int convertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct {
int SM; // 0xMm (hexadecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{{0x10, 8 },
{ 0x11, 8 },
{ 0x12, 8 },
{ 0x13, 8 },
{ 0x20, 32 },
{ 0x21, 48 },
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor) ) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
printf("MapSMtoCores undefined SMversion %d.%d!\n", major, minor);
return -1;
}
//! Automatically selects the fastest available compute device, if it fails to find one, it automatically aborts execution
void selectFastestDevice(){
int num_devices=0;
int device=0;
cudaGetDeviceCount(&num_devices);
cout << "found " << num_devices << " CUDA devices" << endl;
if(num_devices==cudaErrorNoDevice){
puts("cudaErrorNoDevice");
}else if(num_devices==cudaErrorInsufficientDriver){
puts("cudaErrorInsufficientDriver");
}
if (num_devices > 0) {
int max_multiprocessors = 0;
int fastest_device = -1;
// puts("-------------------");
for (device = 0; device < num_devices; device++) {
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, device);
//taken from RQ's source code (RAN.cpp)
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
fastest_device = device;
}
}
cout << "::: using CUDA device " << fastest_device << endl;
cudaSetDevice(fastest_device);
}else{
cout << "no CUDA device available... aborting" << endl;
exit(-1);
}
}
//! Load SVM data (SVs and bias) from a file
//! \param model_filename The filename where to read data from
//! \param n_sv The number of Support Vectors in the file
//! \param ndims The number of features in the file
//! \param h_b The hyperplane's offset
//! \param model A HostMatrix containing the model (features and alphas)
//! \return 0 if successfully loaded the data, -1 otherwise
int readModel(char * model_filename, int n_sv, int &ndims,
cudafloat &h_b, GPUMLib::HostMatrix<cudafloat> &model) {
cout << "loading model from file..." << endl;
ifstream model_file(model_filename);
if (model_file.is_open()) {
//first line tells the amount of SVs
model_file >> n_sv;
//second tells the amount of features
model_file >> ndims;
ndims = ndims - 2;
//third the hyperplanes offset
model_file >> h_b;
//create the model
model.ResizeWithoutPreservingData(n_sv, ndims);
for (int row = 0; row < model.Rows(); row++) {
for (int col = 0; col < model.Columns(); col++) {
cudafloat val;
model_file >> val;
model(row, col) = val;
}
}
model_file.close();
printf("read model from file %s with %d SVs and %d features\n", model_filename, n_sv, ndims);
return 0;
} else {
cout << "Err: Unable to open model file for reading." << endl;
return -1;
}
}
//! Helper function to return precision delta time for 3 counters since last call based upon host high performance counter.
//! Retrieved from shrUtils.h by NVIDIA.
//! \param iCounterID The counter to be used (0, 1 or 2)
//! \return The elapsed time since last call for the given counter
double shrDeltaT(int iCounterID) {
// local var for computation of microseconds since last call
double DeltaT = -1.0;
#ifdef _WIN32 // Windows version of precision host timer
// Variables that need to retain state between calls
static LARGE_INTEGER liOldCount0 = { { 0, 0 } };
static LARGE_INTEGER liOldCount1 = { { 0, 0 } };
static LARGE_INTEGER liOldCount2 = { { 0, 0 } };
// locals for new count, new freq and new time delta
LARGE_INTEGER liNewCount, liFreq;
if (QueryPerformanceFrequency(&liFreq)) {
// Get new counter reading
QueryPerformanceCounter(&liNewCount);
// Update the requested timer
switch (iCounterID) {
case 0: {
// Calculate time difference for timer 0. (zero when called the first time)
DeltaT = liOldCount0.LowPart ? (((double) liNewCount.QuadPart - (double) liOldCount0.QuadPart) / (double) liFreq.QuadPart) : 0.0;
// Reset old count to new
liOldCount0 = liNewCount;
break;
}
case 1: {
// Calculate time difference for timer 1. (zero when called the first time)
DeltaT = liOldCount1.LowPart ? (((double) liNewCount.QuadPart - (double) liOldCount1.QuadPart) / (double) liFreq.QuadPart) : 0.0;
// Reset old count to new
liOldCount1 = liNewCount;
break;
}
case 2: {
// Calculate time difference for timer 2. (zero when called the first time)
DeltaT = liOldCount2.LowPart ? (((double) liNewCount.QuadPart - (double) liOldCount2.QuadPart) / (double) liFreq.QuadPart) : 0.0;
// Reset old count to new
liOldCount2 = liNewCount;
break;
}
default: {
// Requested counter ID out of range
return -9999.0;
}
}
// Returns time difference in seconds sunce the last call
return DeltaT;
} else {
// No high resolution performance counter
return -9999.0;
}
#else
// Linux version of precision host timer. See http://www.informit.com/articles/article.aspx?p=23618&seqNum=8
static struct timeval _NewTime; // new wall clock time (struct representation in seconds and microseconds)
static struct timeval _OldTime0;// old wall clock time 0(struct representation in seconds and microseconds)
static struct timeval _OldTime1;// old wall clock time 1(struct representation in seconds and microseconds)
static struct timeval _OldTime2;// old wall clock time 2(struct representation in seconds and microseconds)
// Get new counter reading
gettimeofday(&_NewTime, NULL);
switch (iCounterID)
{
case 0:
{
// Calculate time difference for timer 0. (zero when called the first time)
DeltaT = ((double)_NewTime.tv_sec + 1.0e-6 * (double)_NewTime.tv_usec) - ((double)_OldTime0.tv_sec + 1.0e-6 * (double)_OldTime0.tv_usec);
// Reset old time 0 to new
_OldTime0.tv_sec = _NewTime.tv_sec;
_OldTime0.tv_usec = _NewTime.tv_usec;
break;
}
case 1:
{
// Calculate time difference for timer 1. (zero when called the first time)
DeltaT = ((double)_NewTime.tv_sec + 1.0e-6 * (double)_NewTime.tv_usec) - ((double)_OldTime1.tv_sec + 1.0e-6 * (double)_OldTime1.tv_usec);
// Reset old time 1 to new
_OldTime1.tv_sec = _NewTime.tv_sec;
_OldTime1.tv_usec = _NewTime.tv_usec;
break;
}
case 2:
{
// Calculate time difference for timer 2. (zero when called the first time)
DeltaT = ((double)_NewTime.tv_sec + 1.0e-6 * (double)_NewTime.tv_usec) - ((double)_OldTime2.tv_sec + 1.0e-6 * (double)_OldTime2.tv_usec);
// Reset old time 2 to new
_OldTime2.tv_sec = _NewTime.tv_sec;
_OldTime2.tv_usec = _NewTime.tv_usec;
break;
}
default:
{
// Requested counter ID out of range
return -9999.0;
}
}
// Returns time difference in seconds since the last call
return DeltaT;
#endif
}
//! Main function to launch the SVM, either to train, classify or both
int main(int argc, char **argv) {
//disable stdout buffering
setvbuf(stdout, NULL, _IONBF, 0);
setvbuf(stderr, NULL, _IONBF, 0);
bool train_model = false;
bool classify_dataset = false;
char * training_filename = NULL;
char * testing_filename = NULL;
char * model_filename = NULL;
char * classification_results_filename = NULL;
GPUMLib::svm_kernel_type kernel_type = SVM_KT_LINEAR;
cudafloat * kernel_args = new cudafloat[4];
kernel_args[0] = 1.0;
kernel_args[1] = 1.0;
kernel_args[2] = 1.0;
kernel_args[3] = 1.0;
cudafloat constant_c = CUDA_VALUE(1.0);
cudafloat constant_c_negative = constant_c;
cudafloat constant_c_positive = constant_c;
cudafloat constant_epsilon = CUDA_VALUE(0.00001);
cudafloat constant_tau = CUDA_VALUE(0.001);
int amount_threads = MAX_THREADS_PER_BLOCK;
bool arguments_error = false;
int positive_class = 1;
//read arguments and compile them
GPUMLib::Settings settings(argc, argv);
unsigned int aa = settings.getAmountArguments();
//go through all arguments
for (size_t i = 0; i < aa; i++) {
Argument* a = settings.getArgument(i);
//training file
if (strcmp(a->argument, "-trainingset") == 0) {
//cout << "extracting training file" << endl;
if (a->value != NULL) {
training_filename = a->value;
} else {
cout << "no training file was given" << endl;
arguments_error = true;
}
}
//classifying file
else if (strcmp(a->argument, "-testingset") == 0) {
//cout << "extracting testing file" << endl;
if (a->value != NULL) {
testing_filename = a->value;
} else {
cout << "no testing file was given" << endl;
arguments_error = true;
}
}
//classification results file
else if (strcmp(a->argument, "-cr") == 0) {
//cout << "extracting classification results file" << endl;
if (a->value != NULL) {
classification_results_filename = a->value;
} else {
cout << "no classification results file was given" << endl;
arguments_error = true;
}
}
//model file
else if (strcmp(a->argument, "-model") == 0) {
//cout << "extracting model file" << endl;
if (a->value != NULL) {
model_filename = a->value;
} else {
cout << "no model file given" << endl;
arguments_error = true;
}
}
//train?
else if (strcmp(a->argument, "-train") == 0) {
//cout << "user wants to train model" << endl;
train_model = true;
}
//classify?
else if (strcmp(a->argument, "-classify") == 0) {
//cout << "user wants to classify dataset" << endl;
classify_dataset = true;
}
//kernel type
else if (strcmp(a->argument, "-k") == 0) {
//cout << "extracting kernel type" << endl;
if (a->value != NULL) {
if (strcmp(a->value, "lin") == 0) {
kernel_type = SVM_KT_LINEAR;
} else if (strcmp(a->value, "pol") == 0) {
kernel_type = SVM_KT_POLYNOMIAL;
} else if (strcmp(a->value, "rbf") == 0) {
kernel_type = SVM_KT_RBF;
} else if (strcmp(a->value, "sig") == 0) {
kernel_type = SVM_KT_SIGMOID;
} else if (strcmp(a->value, "ukf") == 0) {
kernel_type = SVM_KT_UKF;
} else {
cout << "unknown kernel type: " << a->value << endl;
arguments_error = true;
}
} else {
cout << "no kernel type was given" << endl;
arguments_error = true;
}
}
//kernel arguments
//a
else if (strcmp(a->argument, "-a") == 0) {
//cout << "extracting argument <a>" << endl;
if (a->value != NULL) {
kernel_args[0] = (cudafloat) atof(a->value);
} else {
cout << "no argument <a> was given" << endl;
arguments_error = true;
}
}
//b
else if (strcmp(a->argument, "-b") == 0) {
//cout << "extracting argument <b>" << endl;
if (a->value != NULL) {
kernel_args[1] = (cudafloat) atof(a->value);
} else {
cout << "no argument <b> was given" << endl;
arguments_error = true;
}
}
//c
else if (strcmp(a->argument, "-c") == 0) {
//cout << "extracting argument <c>" << endl;
if (a->value != NULL) {
kernel_args[2] = (cudafloat) atof(a->value);
} else {
cout << "no argument <c> was given" << endl;
arguments_error = true;
}
}
//penalization constant
else if (strcmp(a->argument, "-C") == 0) {
//cout << "extracting penalization constant C" << endl;
if (a->value != NULL) {
constant_c_negative = (cudafloat) atof(a->value);
constant_c_positive = (cudafloat) atof(a->value);
} else {
cout << "no penalization constant was given" << endl;
arguments_error = true;
}
}
//negative penalization constant
else if (strcmp(a->argument, "-Cn") == 0) {
//cout << "extracting penalization constant C" << endl;
if (a->value != NULL) {
constant_c_negative = (cudafloat) atof(a->value);
} else {
cout << "no negative penalization constant was given" << endl;
arguments_error = true;
}
}
//positive penalization constant
else if (strcmp(a->argument, "-Cp") == 0) {
//cout << "extracting penalization constant C" << endl;
if (a->value != NULL) {
constant_c_positive = (cudafloat) atof(a->value);
} else {
cout << "no positive penalization constant was given" << endl;
arguments_error = true;
}
}
//optimality conditions tolerance
else if (strcmp(a->argument, "-eps") == 0) {
//cout << "extracting optimality conditions tolerance" << endl;
if (a->value != NULL) {
constant_epsilon = (cudafloat) atof(a->value);
} else {
cout << "no optimality conditions tolerance was given" << endl;
arguments_error = true;
}
}
//optimality gap size
else if (strcmp(a->argument, "-tau") == 0) {
//cout << "extracting optimality gap size" << endl;
if (a->value != NULL) {
constant_tau = (cudafloat) atof(a->value);
} else {
cout << "no optimality gap size was given" << endl;
arguments_error = true;
}
}
//amount of threads
else if (strcmp(a->argument, "-threads") == 0) {
//cout << "extracting amount of threads" << endl;
if (a->value != NULL) {
amount_threads = atoi(a->value);
} else {
cout << "no amount of threads was given" << endl;
arguments_error = true;
}
}
//positive class
else if (strcmp(a->argument, "-positive") == 0) {
if (a->value != NULL) {
positive_class = atoi(a->value);
} else {
cout << "positive label was not given" << endl;
arguments_error = true;
}
}
}
//for training we require the training dataset... duh
if (train_model) {
if (training_filename == NULL) {
cout << "Error: no training dataset was given - Aborting." << endl;
arguments_error = true;
} else {
//cout << "training dataset is " << training_filename << endl;
}
}
//for classifying we require both the training and testing datasets
if (classify_dataset) {
//if in this execution the model is not trained, it must be read from somewhere...
if (train_model == false && model_filename == NULL) {
cout << "Error: no model file was given." << endl;
return -1;
} else {
//cout << "model file is " << model_filename << endl;
}
if (testing_filename == NULL) {
cout << "Error: no testing dataset was given." << endl;
return -1;
} else {
//cout << "testing dataset is " << model_filename << endl;
}
}
if (classify_dataset == false && train_model == false) {
cout << "Error: the program was not instructed to train nor to classify." << endl;
arguments_error = true;
}
if (arguments_error) {
cout << "Error: invalid arguments." << endl;
cout << "----------------------------------------------------------" << endl;
cout << "The arguments are the following:" << endl;
cout << "" << endl;
cout << "to train using the training samples" << endl;
cout << "\t -train" << endl;
cout << "" << endl;
cout << "to classify using the trained svm model" << endl;
cout << "\t -classify" << endl;
cout << "" << endl;
cout << "file with the training set (filename) - required:" << endl;
cout << "\t -trainingset <training file>" << endl;
cout << "" << endl;
cout << "file with the testing set (filename) - required:" << endl;
cout << "\t -testingset <training file>" << endl;
cout << "" << endl;
cout << "file where to store the trained svm model (filename):" << endl;
cout << "\t -model <output file>" << endl;
cout << "" << endl;
cout << "file where to store the classification results (filename):" << endl;
cout << "\t -cr <output file>" << endl;
cout << "" << endl;
cout << "which kernel to use (text):" << endl;
cout << "\t -k <type>" << endl;
cout << "\t where <type> can be one of the following:" << endl;
cout << "\t\t lin - for the linear kernel: K(x1,x2) = x1.x2" << endl;
cout << "\t\t pol - for the polynomial kernel: K(x1,x2) = a*(x1.x2+b)^c" << endl;
cout << "\t\t rbf - for the gaussian kernel: K(x1,x2) = e^(-a*||x1-x2||^2)" << endl;
cout << "\t\t sig - for the sigmoid kernel: K(x1,x2) = tanh(a*(x1.x2)+b)" << endl;
cout << "\t\t ukf - for the universal function kernel: K(x1,x2) = a*(||x1-x2||^2+b^2)^-c" << endl;
cout << "\t being x1.x2 the dot product between vectors x1 and x2" << endl;
cout << "" << endl;
cout << "kernel arguments (decimal number):" << endl;
cout << "\t -a <value>" << endl;
cout << "\t -b <value>" << endl;
cout << "\t -c <value>" << endl;
cout << "" << endl;
cout << "penalization constant C (decimal number):" << endl;
cout << "\t -C <value>" << endl;
cout << "" << endl;
cout << "optimality conditions tolerance, Epsilon, which allows some numerical uncertainty on the heuristics (decimal number):" << endl;
cout << "\t -eps <value>" << endl;
cout << "" << endl;
cout << "optimality gap size, Tau, which regulates the training convergence (decimal number):" << endl;
cout << "\t -tau <value>" << endl;
cout << "" << endl;
cout << "amount of threads to use in trainer and classifier (integer, 0 = automatic):" << endl;
cout << "\t -threads <value>" << endl;
cout << "" << endl;
cout << "ABORTING." << endl;
return -1;
}
switch (kernel_type) {
case SVM_KT_RBF:
if (DEBUG)
cout << "using RBF kernel with gamma = " << kernel_args[0] << endl;
break;
case SVM_KT_LINEAR:
if (DEBUG)
cout << "using linear kernel" << endl;
break;
case SVM_KT_POLYNOMIAL:
if (DEBUG)
cout << "using polynomial kernel" << endl;
break;
case SVM_KT_SIGMOID:
if (DEBUG)
cout << "using sigmoid kernel" << endl;
break;
case SVM_KT_UKF:
if (DEBUG)
cout << "using universal kernel function with L = " << kernel_args[0] << " b (sigma) = " << kernel_args[1] << " and c (alpha) = " << kernel_args[2]
<< endl;
break;
}
if (constant_c_negative <= 0 || constant_c_positive <= 0) {
cout << "Error: invalid value for C" << endl;
return -1;
}
if (DEBUG)
cout << "C negative = " << constant_c_negative << " C positive = " << constant_c_positive << endl;
if (DEBUG)
cout << "epsilon = " << constant_epsilon << endl;
if (constant_tau <= 0) {
cout << "Error: invalid value for epsilon" << endl;
return -1;
}
if (DEBUG)
cout << "tau = " << constant_tau << endl;
// read training dataset file
// read training dataset file
// read training dataset file
//create a matrix to hold the model
//structure: alpha_i | class_i | attribute_0 ... attribute_n-1
GPUMLib::HostMatrix<cudafloat> h_model(1, 1, GPUMLib::ColumnMajor);
int n_sv = -1;
int ndims = -1;
cudafloat h_b = CUDA_VALUE(0.0);
selectFastestDevice();
//create a instance to manage the GPU SVM
GPUMLib::SVM svm;
//train model if requested
if (train_model) {
//build matrix for holding training data set
cout << "reading training dataset file " << training_filename << endl;
FILE *f_input = fopen(training_filename, "r");
if (f_input == NULL) {
cout << "error while reading training dataset file" << endl;
return -1;
}
ndims = getNumberOfColumns(f_input) - 1;
int training_dataset_size = getNumberOfSamples(f_input);
//cout << "allocating storage for training dataset:" << training_filename << endl;
//create the storage in hosts memory for the dataset
GPUMLib::HostMatrix<cudafloat> h_samples(training_dataset_size, ndims, GPUMLib::ColumnMajor);
GPUMLib::HostArray<int> h_classes(training_dataset_size);
// create data structures for storing alphas
GPUMLib::HostArray<cudafloat> h_alphas(training_dataset_size);
//read the dataset
readDataSet(f_input, h_samples, h_classes, ndims + 1, positive_class);
fclose(f_input);
shrDeltaT(1);
shrDeltaT(1);
double t0 = shrDeltaT(1);
svm.train(h_samples, h_classes, constant_c_negative, constant_c_positive,
constant_epsilon, constant_tau, kernel_type, kernel_args, amount_threads, h_alphas,
//training_dataset_size,
n_sv, h_model,
//ndims,
h_b);
double t1 = shrDeltaT(1);
printf("training took %f s\n", t1 - t0);
//if requested save model to a file
if (model_filename != NULL) {
saveModel(model_filename, h_model, h_b);
}
}
if (classify_dataset) {
puts("------------------");
// if in this call the model hasn't been created, load it
if (!train_model) {
if (readModel(model_filename, n_sv, ndims, h_b, h_model) != 0) {
cout << "error while reading model" << endl;
return -1;
}
}
printf("using model with %d SVs and %d features\n", n_sv, ndims);
// read testing dataset file
// read testing dataset file
// read testing dataset file
//build matrix for holding testing data set
cout << "reading testing dataset file " << testing_filename << endl;
FILE *f_input_test = fopen(testing_filename, "r");
if (f_input_test == NULL) {
cout << "error while reading testing dataset file" << endl;
return -1;
}
int testing_dataset_size = getNumberOfSamples(f_input_test);
GPUMLib::HostMatrix<cudafloat> h_testing_samples(testing_dataset_size, ndims, GPUMLib::ColumnMajor);
GPUMLib::HostArray<int> h_testing_classes(testing_dataset_size);
//read the dataset
readDataSet(f_input_test, h_testing_samples, h_testing_classes, ndims + 1, positive_class);
fclose(f_input_test);
// start classifying phase
shrDeltaT(1);
shrDeltaT(1);
double t0 = shrDeltaT(1);
GPUMLib::HostArray<int> h_testing_results(testing_dataset_size);
svm.classify(h_model, h_testing_samples, kernel_args, amount_threads, kernel_type, n_sv, h_b, ndims, h_testing_results);
double t1 = shrDeltaT(1);
printf("classification took %f s\n", t1 - t0);
// for (int i=0;i<h_testing_results.Length();i++){
// printf("target\t%d\tpredicted\t%d\n", h_testing_classes[i], h_testing_results[i]);
// }
showClassificationMetrics(h_testing_classes, h_testing_results, testing_dataset_size);
//if requested save results to a file
if (classification_results_filename != NULL) {
if (DEBUG)
cout << "saving classification results to file " << classification_results_filename << endl;
char * WRITE_BUF = new char[BUFFERING_BLOCK_SIZE];
FILE *model_file;
model_file = fopen(classification_results_filename, "w");
if (model_file) {
//buffer process
setvbuf(model_file, WRITE_BUF, _IOFBF, BUFFERING_BLOCK_SIZE);
//give the amount of samples
fprintf(model_file, "#%d\n", testing_dataset_size);
//first line gives a comment
fprintf(model_file, "target,predicted\n");
for (int i = 0; i < testing_dataset_size; i++) {
fprintf(model_file, "%d,%d\n", h_testing_classes[i], h_testing_results[i]);
}
fclose(model_file);
} else
cout << "Err: Unable to open classification results file for write." << endl;
delete WRITE_BUF;
}
}
delete kernel_args;
if (DEBUG)
cout << "exiting..." << endl;
return 0;
}
} //namespace
int main(int argc, char **argv){
GPUMLib::main(argc, argv);
}
|
square.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 10
__global__ void square_array(int* a) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) a[idx] = a[idx] * a[idx];
}
int main(void) {
int host[N];
int* device;
int i;
dim3 grid, block;
size_t nbytes = N * sizeof(int);
grid.x = 1;
grid.y = 1;
grid.z = 1;
block.x = 1;
block.y = 1;
block.z = 1;
int nthreads = 4;
int nblocks = N/nthreads + !!(N % nthreads);
grid.x = nblocks;
block.x = nthreads;
hipMalloc(&device, nbytes);
for (i = 0; i != N; ++i)
host[i] = (int)i;
hipMemcpy(device, host, nbytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( square_array), dim3(grid), dim3(block), 0, 0, device);
hipMemcpy(host, device, nbytes, hipMemcpyDeviceToHost);
hipFree(device);
for (i = 0; i != N; ++i) {
printf("%d: %d\n", i, host[i]);
}
}
|
square.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#define N 10
__global__ void square_array(int* a) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) a[idx] = a[idx] * a[idx];
}
int main(void) {
int host[N];
int* device;
int i;
dim3 grid, block;
size_t nbytes = N * sizeof(int);
grid.x = 1;
grid.y = 1;
grid.z = 1;
block.x = 1;
block.y = 1;
block.z = 1;
int nthreads = 4;
int nblocks = N/nthreads + !!(N % nthreads);
grid.x = nblocks;
block.x = nthreads;
cudaMalloc(&device, nbytes);
for (i = 0; i != N; ++i)
host[i] = (int)i;
cudaMemcpy(device, host, nbytes, cudaMemcpyHostToDevice);
square_array<<<grid, block>>>(device);
cudaMemcpy(host, device, nbytes, cudaMemcpyDeviceToHost);
cudaFree(device);
for (i = 0; i != N; ++i) {
printf("%d: %d\n", i, host[i]);
}
}
|
b0d9fd83689814c769a967885b208dbe569dc50f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "SoftBodyMesh3D.h"
#include <vector>
#include <set>
#include <assert.h>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <cinder/app/AppBase.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <cuMat/src/ConjugateGradient.h>
#include <cinder/Log.h>
#include "helper_matrixmath.h"
#include "CommonKernels.h"
#include "DebugUtils.h"
#include "CudaTimer.h"
#ifndef NDEBUG
#include <Eigen/Dense>
#endif
namespace ar3d {
void SoftBodyMesh3D::Input::assertSizes() const
{
assert(indices_.rows() > 0);
assert(indices_.rows() == numElements_);
assert(referencePositions_.rows() > 0);
assert(numTotalNodes_ == referencePositions_.rows());
assert(numFreeNodes_ <= numTotalNodes_);
assert(neumannForces_.rows() <= numFreeNodes_);
assert(numFreeNodes_ > 0);
}
SoftBodyMesh3D::Precomputed SoftBodyMesh3D::allocatePrecomputed(const Input& input)
{
input.assertSizes();
Precomputed p;
p.bodyForces_ = Vector3X(input.numFreeNodes_); p.bodyForces_.setZero();
p.lumpedMass_ = VectorX(input.numFreeNodes_); p.lumpedMass_.setZero();
return p;
}
SoftBodyMesh3D::State SoftBodyMesh3D::allocateState(const Input& input)
{
State s;
s.displacements_ = Vector3X(input.numFreeNodes_); s.displacements_.setZero();
s.velocities_ = Vector3X(input.numFreeNodes_); s.velocities_.setZero();
return s;
}
cuMat::SparsityPattern<cuMat::CSR> SoftBodyMesh3D::computeSparsityPattern(const std::vector<int4>& indices, int numFreeNodes)
{
typedef cuMat::SparsityPattern<cuMat::CSR> SPattern;
SPattern pattern;
pattern.rows = numFreeNodes;
pattern.cols = numFreeNodes;
//create entry set
typedef std::pair<int, int> entry_t;
std::set<entry_t> entries;
for (const int4& e : indices)
{
const std::array<int, 4> ix = {e.x, e.y, e.z, e.w};
for (int i = 0; i < 4; ++i) {
const int nodeI = ix[i];
const bool dirichletI = nodeI >= numFreeNodes;
for (int j = 0; j < 4; ++j) {
const int nodeJ = ix[j];
const bool dirichletJ = nodeJ >= numFreeNodes;
if (!dirichletI && !dirichletJ) {
entries.insert(std::make_pair(nodeI, nodeJ));
}
}
}
}
SMatrix3x3::StorageIndex nnz = static_cast<SMatrix3x3::StorageIndex>(entries.size());
pattern.nnz = nnz;
//allocate indices on the host
std::vector<SMatrix3x3::StorageIndex> JA(pattern.rows + 1, 0); //outer
std::vector<SMatrix3x3::StorageIndex> IA; IA.reserve(nnz); //inner
//loop through all sorted entries and build indices
entry_t lastEntry(-1,-1);
for (const entry_t& e : entries)
{
//assert sorted
assert(lastEntry.first < e.first || (lastEntry.first==e.first && lastEntry.second<e.second));
lastEntry = e;
//increment outer index, add inner index
JA[lastEntry.first + 1]++;
IA.push_back(lastEntry.second);
}
assert(IA.size() == nnz);
for (int i=0; i<pattern.rows; ++i)
JA[i+1] += JA[i]; //prefix sum
//copy to device
pattern.JA = SPattern::IndexVector(pattern.rows + 1);
pattern.JA.copyFromHost(JA.data());
pattern.IA = SPattern::IndexVector(nnz);
pattern.IA.copyFromHost(IA.data());
CI_LOG_I("Sparsity pattern created, matrix size: " << pattern.rows << ", non-zeros: " << nnz
<< " (" << (100.0*nnz / pattern.rows / pattern.rows) << "%, avg " << real(nnz/pattern.rows) << " per row)");
pattern.assertValid();
return pattern;
}
SoftBodyMesh3D::Input SoftBodyMesh3D::createBar(const InputBarSettings& settings)
{
real3 min = settings.center - settings.halfsize;
real3 max = settings.center + settings.halfsize;
real3 minDirichlet = settings.centerDirichlet - settings.halfsizeDirichlet;
real3 maxDirichlet = settings.centerDirichlet + settings.halfsizeDirichlet;
real3 size = max - min;
int3 resolution = make_int3(
::max(1, static_cast<int>(round(size.x * settings.resolution))),
::max(1, static_cast<int>(round(size.y * settings.resolution))),
::max(1, static_cast<int>(round(size.z * settings.resolution))));
real3 invRes = make_real3(1.0/(resolution.x), 1.0/(resolution.y), 1.0/(resolution.z));
//compute counts
Input input;
input.numTotalNodes_ = (resolution.x+1)*(resolution.y+1)*(resolution.z+1);
input.numElements_ = 6 * resolution.x * resolution.y * resolution.z;
input.numFreeNodes_ = input.numTotalNodes_;
//index conversion + create vertices
std::vector<real3> vertices(input.numTotalNodes_);
std::vector<int> indexMap(input.numTotalNodes_);
#define IDX(ix, iy, iz) ((ix) + (resolution.x+1)*((iy) + (resolution.y+1)*(iz)))
int i=0;
for (int z=0; z<=resolution.z; ++z) for (int y=0; y<=resolution.y; ++y) for (int x=0; x<=resolution.x; ++x)
{
real3 pos = min + size * make_real3(x, y, z) * invRes;
if (settings.enableDirichlet &&
pos.x>=minDirichlet.x && pos.y>=minDirichlet.y && pos.z>=minDirichlet.z &&
pos.x<=maxDirichlet.x && pos.y<=maxDirichlet.y && pos.z<=maxDirichlet.z)
{
//dirichlet boundary (add them to the end)
input.numFreeNodes_--;
vertices[input.numFreeNodes_] = pos;
indexMap[IDX(x, y, z)] = input.numFreeNodes_;
} else
{
//free
vertices[i] = pos;
indexMap[IDX(x, y, z)] = i;
i++;
}
}
//create indices
std::vector<int4> indices; indices.reserve(input.numElements_);
for (int z=0; z<resolution.z; ++z) for (int y=0; y<resolution.y; ++y) for (int x=0; x<resolution.x; ++x)
{
//cinder::app::console() << IDX(x, y, z) << " " << IDX(x + 1, y + 1, z + 1) << std::endl;
indices.push_back(make_int4(indexMap.at(IDX(x, y, z)), indexMap.at(IDX(x+1, y, z)), indexMap.at(IDX(x, y+1, z)), indexMap.at(IDX(x, y, z+1))));
indices.push_back(make_int4(indexMap.at(IDX(x+1, y, z)), indexMap.at(IDX(x, y+1, z)), indexMap.at(IDX(x, y, z+1)), indexMap.at(IDX(x+1, y, z+1))));
indices.push_back(make_int4(indexMap.at(IDX(x, y+1, z)), indexMap.at(IDX(x, y, z+1)), indexMap.at(IDX(x+1, y, z+1)), indexMap.at(IDX(x, y+1, z+1))));
indices.push_back(make_int4(indexMap.at(IDX(x+1, y, z)), indexMap.at(IDX(x+1, y+1, z)), indexMap.at(IDX(x, y+1, z)), indexMap.at(IDX(x+1, y, z+1))));
indices.push_back(make_int4(indexMap.at(IDX(x, y+1, z)), indexMap.at(IDX(x+1, y+1, z)), indexMap.at(IDX(x, y+1, z+1)), indexMap.at(IDX(x+1, y, z+1))));
indices.push_back(make_int4(indexMap.at(IDX(x+1, y+1, z)), indexMap.at(IDX(x, y+1, z+1)), indexMap.at(IDX(x+1, y, z+1)), indexMap.at(IDX(x+1, y+1, z+1))));
}
#undef IDX
//copy to the gpu
input.indices_ = Vector4Xi(input.numElements_); input.indices_.copyFromHost(indices.data());
input.referencePositions_ = Vector3X(input.numTotalNodes_); input.referencePositions_.copyFromHost(vertices.data());
input.neumannForces_ = Vector3X::Constant(input.numFreeNodes_, make_real3(0,0,0));
//compute sparsity pattern
input.sparsityPattern_ = computeSparsityPattern(indices, input.numFreeNodes_);
CUMAT_SAFE_CALL(hipDeviceSynchronize());
return input;
}
//---------------------------------------------
// The actual instances:
// They only store the settings for simple access
// No logic is implemented here
//---------------------------------------------
SoftBodyMesh3D::SoftBodyMesh3D(const Input& input)
: input_(input)
, precomputed_(allocatePrecomputed(input))
, state_(allocateState(input))
{
allocateTemporary(input_);
//fill statistics
statistics_.numElements = input_.numElements_;
statistics_.numFreeNodes = input_.numFreeNodes_;
statistics_.numFixedNodes = input_.numTotalNodes_ - input_.numFreeNodes_;
statistics_.avgEntriesPerRow = input_.sparsityPattern_.nnz / double(input_.sparsityPattern_.rows);
}
SoftBodyMesh3D::~SoftBodyMesh3D()
{
}
void SoftBodyMesh3D::reset()
{
state_.displacements_.setZero();
state_.velocities_.setZero();
resetTimings();
}
void SoftBodyMesh3D::solve(bool dynamic, BackgroundWorker2* worker)
{
resetTemporary();
CudaTimer timer;
//1. Forces
worker->setStatus("Mesh: compute forces");
if (isRecordTimings()) timer.start();
forces_.inplace() = precomputed_.bodyForces_;
if (settings_.enableCollision_)
{
applyCollisionForces(input_, settings_, state_, forces_);
}
if (isRecordTimings()) { timer.stop(); statistics_.collisionForcesTime.push_back(timer.duration()); }
if (worker->isInterrupted()) return;
//2. stiffness matrix
worker->setStatus("Mesh: compute stiffness matrix");
if (isRecordTimings()) timer.start();
computeStiffnessMatrix(input_, state_, settings_, stiffness_, forces_);
if (isRecordTimings()) { timer.stop(); statistics_.matrixAssembleTime.push_back(timer.duration()); }
if (worker->isInterrupted()) return;
//3. Solve
if (dynamic)
{
worker->setStatus("Mesh: Newmark compute matrices");
CommonKernels::newmarkTimeIntegration(
stiffness_, forces_, precomputed_.lumpedMass_,
state_.displacements_, state_.velocities_,
settings_.dampingAlpha_, settings_.dampingBeta_, settings_.timestep_,
newmarkA_, newmarkB_, settings_.newmarkTheta_);
worker->setStatus("Mesh: CG solve");
Vector3X currentDisplacement = state_.displacements_ + make_real3(settings_.timestep_) * state_.velocities_; //initial guess
int iterations = settings_.solverIterations_;
real tolError = settings_.solverTolerance_;
if (isRecordTimings()) timer.start();
CommonKernels::solveCG(newmarkA_, newmarkB_, currentDisplacement, iterations, tolError);
if (isRecordTimings()) { timer.stop(); statistics_.cgTime.push_back(timer.duration()); statistics_.cgIterations.push_back(iterations); }
worker->setStatus("Mesh: Newmark compute velocity");
Vector3X currentVelocity(input_.numFreeNodes_);
CommonKernels::newmarkComputeVelocity(
state_.displacements_, state_.velocities_,
currentDisplacement, currentVelocity,
settings_.timestep_, settings_.newmarkTheta_);
state_.displacements_.inplace() = currentDisplacement;
state_.velocities_.inplace() = currentVelocity;
} else
{
worker->setStatus("Mesh: CG solve");
state_.displacements_.setZero();
int iterations = settings_.solverIterations_;
real tolError = settings_.solverTolerance_;
if (isRecordTimings()) timer.start();
CommonKernels::solveCG(stiffness_, forces_, state_.displacements_, iterations, tolError);
if (isRecordTimings()) { timer.stop(); statistics_.cgTime.push_back(timer.duration()); statistics_.cgIterations.push_back(iterations); }
#if 0
Eigen::MatrixXf eigenStiffness = DebugUtils::matrixToEigen(stiffness_);
Eigen::VectorXf eigenForce = DebugUtils::vectorToEigen(forces_);
cinder::app::console() << "Stiffness matrix:\n" << eigenStiffness << std::endl;
if (!eigenStiffness.isApprox(eigenStiffness.transpose())) cinder::app::console() << " Stiffness matrix is not symmetric!!" << std::endl;
cinder::app::console() << "Force vector:\n" << eigenForce.transpose() << std::endl;
cinder::app::console() << "Solution:\n" << DebugUtils::vectorToEigen(state_.displacements_).transpose() << std::endl;
Eigen::VectorXf eigenSolution = eigenStiffness.fullPivLu().solve(eigenForce);
cinder::app::console() << "Solution using Eigen:\n" << eigenSolution.transpose() << std::endl;
DebugUtils::eigenToVector(eigenSolution, state_.displacements_);
#endif
}
worker->setStatus("Mesh: done");
}
void SoftBodyMesh3D::updateSettings()
{
precomputed_.bodyForces_.setZero();
precomputed_.lumpedMass_.setZero();
computeMassMatrix(input_, settings_, precomputed_.lumpedMass_);
computeBodyForces(input_, settings_, precomputed_.bodyForces_);
CI_LOG_I("Settings updated, mass matrix and body forces recomputed");
}
void SoftBodyMesh3D::allocateTemporary(const Input& input)
{
forces_ = Vector3X(input.numFreeNodes_);
stiffness_ = SMatrix3x3(input.sparsityPattern_);
newmarkA_ = SMatrix3x3(input.sparsityPattern_);
newmarkB_ = Vector3X(input.numFreeNodes_);
}
void SoftBodyMesh3D::resetTemporary()
{
forces_.setZero();
stiffness_.setZero();
}
}
|
b0d9fd83689814c769a967885b208dbe569dc50f.cu
|
#include "SoftBodyMesh3D.h"
#include <vector>
#include <set>
#include <assert.h>
#include <algorithm>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <cinder/app/AppBase.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <cuMat/src/ConjugateGradient.h>
#include <cinder/Log.h>
#include "helper_matrixmath.h"
#include "CommonKernels.h"
#include "DebugUtils.h"
#include "CudaTimer.h"
#ifndef NDEBUG
#include <Eigen/Dense>
#endif
namespace ar3d {
void SoftBodyMesh3D::Input::assertSizes() const
{
assert(indices_.rows() > 0);
assert(indices_.rows() == numElements_);
assert(referencePositions_.rows() > 0);
assert(numTotalNodes_ == referencePositions_.rows());
assert(numFreeNodes_ <= numTotalNodes_);
assert(neumannForces_.rows() <= numFreeNodes_);
assert(numFreeNodes_ > 0);
}
SoftBodyMesh3D::Precomputed SoftBodyMesh3D::allocatePrecomputed(const Input& input)
{
input.assertSizes();
Precomputed p;
p.bodyForces_ = Vector3X(input.numFreeNodes_); p.bodyForces_.setZero();
p.lumpedMass_ = VectorX(input.numFreeNodes_); p.lumpedMass_.setZero();
return p;
}
SoftBodyMesh3D::State SoftBodyMesh3D::allocateState(const Input& input)
{
State s;
s.displacements_ = Vector3X(input.numFreeNodes_); s.displacements_.setZero();
s.velocities_ = Vector3X(input.numFreeNodes_); s.velocities_.setZero();
return s;
}
cuMat::SparsityPattern<cuMat::CSR> SoftBodyMesh3D::computeSparsityPattern(const std::vector<int4>& indices, int numFreeNodes)
{
typedef cuMat::SparsityPattern<cuMat::CSR> SPattern;
SPattern pattern;
pattern.rows = numFreeNodes;
pattern.cols = numFreeNodes;
//create entry set
typedef std::pair<int, int> entry_t;
std::set<entry_t> entries;
for (const int4& e : indices)
{
const std::array<int, 4> ix = {e.x, e.y, e.z, e.w};
for (int i = 0; i < 4; ++i) {
const int nodeI = ix[i];
const bool dirichletI = nodeI >= numFreeNodes;
for (int j = 0; j < 4; ++j) {
const int nodeJ = ix[j];
const bool dirichletJ = nodeJ >= numFreeNodes;
if (!dirichletI && !dirichletJ) {
entries.insert(std::make_pair(nodeI, nodeJ));
}
}
}
}
SMatrix3x3::StorageIndex nnz = static_cast<SMatrix3x3::StorageIndex>(entries.size());
pattern.nnz = nnz;
//allocate indices on the host
std::vector<SMatrix3x3::StorageIndex> JA(pattern.rows + 1, 0); //outer
std::vector<SMatrix3x3::StorageIndex> IA; IA.reserve(nnz); //inner
//loop through all sorted entries and build indices
entry_t lastEntry(-1,-1);
for (const entry_t& e : entries)
{
//assert sorted
assert(lastEntry.first < e.first || (lastEntry.first==e.first && lastEntry.second<e.second));
lastEntry = e;
//increment outer index, add inner index
JA[lastEntry.first + 1]++;
IA.push_back(lastEntry.second);
}
assert(IA.size() == nnz);
for (int i=0; i<pattern.rows; ++i)
JA[i+1] += JA[i]; //prefix sum
//copy to device
pattern.JA = SPattern::IndexVector(pattern.rows + 1);
pattern.JA.copyFromHost(JA.data());
pattern.IA = SPattern::IndexVector(nnz);
pattern.IA.copyFromHost(IA.data());
CI_LOG_I("Sparsity pattern created, matrix size: " << pattern.rows << ", non-zeros: " << nnz
<< " (" << (100.0*nnz / pattern.rows / pattern.rows) << "%, avg " << real(nnz/pattern.rows) << " per row)");
pattern.assertValid();
return pattern;
}
SoftBodyMesh3D::Input SoftBodyMesh3D::createBar(const InputBarSettings& settings)
{
real3 min = settings.center - settings.halfsize;
real3 max = settings.center + settings.halfsize;
real3 minDirichlet = settings.centerDirichlet - settings.halfsizeDirichlet;
real3 maxDirichlet = settings.centerDirichlet + settings.halfsizeDirichlet;
real3 size = max - min;
int3 resolution = make_int3(
std::max(1, static_cast<int>(round(size.x * settings.resolution))),
std::max(1, static_cast<int>(round(size.y * settings.resolution))),
std::max(1, static_cast<int>(round(size.z * settings.resolution))));
real3 invRes = make_real3(1.0/(resolution.x), 1.0/(resolution.y), 1.0/(resolution.z));
//compute counts
Input input;
input.numTotalNodes_ = (resolution.x+1)*(resolution.y+1)*(resolution.z+1);
input.numElements_ = 6 * resolution.x * resolution.y * resolution.z;
input.numFreeNodes_ = input.numTotalNodes_;
//index conversion + create vertices
std::vector<real3> vertices(input.numTotalNodes_);
std::vector<int> indexMap(input.numTotalNodes_);
#define IDX(ix, iy, iz) ((ix) + (resolution.x+1)*((iy) + (resolution.y+1)*(iz)))
int i=0;
for (int z=0; z<=resolution.z; ++z) for (int y=0; y<=resolution.y; ++y) for (int x=0; x<=resolution.x; ++x)
{
real3 pos = min + size * make_real3(x, y, z) * invRes;
if (settings.enableDirichlet &&
pos.x>=minDirichlet.x && pos.y>=minDirichlet.y && pos.z>=minDirichlet.z &&
pos.x<=maxDirichlet.x && pos.y<=maxDirichlet.y && pos.z<=maxDirichlet.z)
{
//dirichlet boundary (add them to the end)
input.numFreeNodes_--;
vertices[input.numFreeNodes_] = pos;
indexMap[IDX(x, y, z)] = input.numFreeNodes_;
} else
{
//free
vertices[i] = pos;
indexMap[IDX(x, y, z)] = i;
i++;
}
}
//create indices
std::vector<int4> indices; indices.reserve(input.numElements_);
for (int z=0; z<resolution.z; ++z) for (int y=0; y<resolution.y; ++y) for (int x=0; x<resolution.x; ++x)
{
//cinder::app::console() << IDX(x, y, z) << " " << IDX(x + 1, y + 1, z + 1) << std::endl;
indices.push_back(make_int4(indexMap.at(IDX(x, y, z)), indexMap.at(IDX(x+1, y, z)), indexMap.at(IDX(x, y+1, z)), indexMap.at(IDX(x, y, z+1))));
indices.push_back(make_int4(indexMap.at(IDX(x+1, y, z)), indexMap.at(IDX(x, y+1, z)), indexMap.at(IDX(x, y, z+1)), indexMap.at(IDX(x+1, y, z+1))));
indices.push_back(make_int4(indexMap.at(IDX(x, y+1, z)), indexMap.at(IDX(x, y, z+1)), indexMap.at(IDX(x+1, y, z+1)), indexMap.at(IDX(x, y+1, z+1))));
indices.push_back(make_int4(indexMap.at(IDX(x+1, y, z)), indexMap.at(IDX(x+1, y+1, z)), indexMap.at(IDX(x, y+1, z)), indexMap.at(IDX(x+1, y, z+1))));
indices.push_back(make_int4(indexMap.at(IDX(x, y+1, z)), indexMap.at(IDX(x+1, y+1, z)), indexMap.at(IDX(x, y+1, z+1)), indexMap.at(IDX(x+1, y, z+1))));
indices.push_back(make_int4(indexMap.at(IDX(x+1, y+1, z)), indexMap.at(IDX(x, y+1, z+1)), indexMap.at(IDX(x+1, y, z+1)), indexMap.at(IDX(x+1, y+1, z+1))));
}
#undef IDX
//copy to the gpu
input.indices_ = Vector4Xi(input.numElements_); input.indices_.copyFromHost(indices.data());
input.referencePositions_ = Vector3X(input.numTotalNodes_); input.referencePositions_.copyFromHost(vertices.data());
input.neumannForces_ = Vector3X::Constant(input.numFreeNodes_, make_real3(0,0,0));
//compute sparsity pattern
input.sparsityPattern_ = computeSparsityPattern(indices, input.numFreeNodes_);
CUMAT_SAFE_CALL(cudaDeviceSynchronize());
return input;
}
//---------------------------------------------
// The actual instances:
// They only store the settings for simple access
// No logic is implemented here
//---------------------------------------------
SoftBodyMesh3D::SoftBodyMesh3D(const Input& input)
: input_(input)
, precomputed_(allocatePrecomputed(input))
, state_(allocateState(input))
{
allocateTemporary(input_);
//fill statistics
statistics_.numElements = input_.numElements_;
statistics_.numFreeNodes = input_.numFreeNodes_;
statistics_.numFixedNodes = input_.numTotalNodes_ - input_.numFreeNodes_;
statistics_.avgEntriesPerRow = input_.sparsityPattern_.nnz / double(input_.sparsityPattern_.rows);
}
SoftBodyMesh3D::~SoftBodyMesh3D()
{
}
void SoftBodyMesh3D::reset()
{
state_.displacements_.setZero();
state_.velocities_.setZero();
resetTimings();
}
void SoftBodyMesh3D::solve(bool dynamic, BackgroundWorker2* worker)
{
resetTemporary();
CudaTimer timer;
//1. Forces
worker->setStatus("Mesh: compute forces");
if (isRecordTimings()) timer.start();
forces_.inplace() = precomputed_.bodyForces_;
if (settings_.enableCollision_)
{
applyCollisionForces(input_, settings_, state_, forces_);
}
if (isRecordTimings()) { timer.stop(); statistics_.collisionForcesTime.push_back(timer.duration()); }
if (worker->isInterrupted()) return;
//2. stiffness matrix
worker->setStatus("Mesh: compute stiffness matrix");
if (isRecordTimings()) timer.start();
computeStiffnessMatrix(input_, state_, settings_, stiffness_, forces_);
if (isRecordTimings()) { timer.stop(); statistics_.matrixAssembleTime.push_back(timer.duration()); }
if (worker->isInterrupted()) return;
//3. Solve
if (dynamic)
{
worker->setStatus("Mesh: Newmark compute matrices");
CommonKernels::newmarkTimeIntegration(
stiffness_, forces_, precomputed_.lumpedMass_,
state_.displacements_, state_.velocities_,
settings_.dampingAlpha_, settings_.dampingBeta_, settings_.timestep_,
newmarkA_, newmarkB_, settings_.newmarkTheta_);
worker->setStatus("Mesh: CG solve");
Vector3X currentDisplacement = state_.displacements_ + make_real3(settings_.timestep_) * state_.velocities_; //initial guess
int iterations = settings_.solverIterations_;
real tolError = settings_.solverTolerance_;
if (isRecordTimings()) timer.start();
CommonKernels::solveCG(newmarkA_, newmarkB_, currentDisplacement, iterations, tolError);
if (isRecordTimings()) { timer.stop(); statistics_.cgTime.push_back(timer.duration()); statistics_.cgIterations.push_back(iterations); }
worker->setStatus("Mesh: Newmark compute velocity");
Vector3X currentVelocity(input_.numFreeNodes_);
CommonKernels::newmarkComputeVelocity(
state_.displacements_, state_.velocities_,
currentDisplacement, currentVelocity,
settings_.timestep_, settings_.newmarkTheta_);
state_.displacements_.inplace() = currentDisplacement;
state_.velocities_.inplace() = currentVelocity;
} else
{
worker->setStatus("Mesh: CG solve");
state_.displacements_.setZero();
int iterations = settings_.solverIterations_;
real tolError = settings_.solverTolerance_;
if (isRecordTimings()) timer.start();
CommonKernels::solveCG(stiffness_, forces_, state_.displacements_, iterations, tolError);
if (isRecordTimings()) { timer.stop(); statistics_.cgTime.push_back(timer.duration()); statistics_.cgIterations.push_back(iterations); }
#if 0
Eigen::MatrixXf eigenStiffness = DebugUtils::matrixToEigen(stiffness_);
Eigen::VectorXf eigenForce = DebugUtils::vectorToEigen(forces_);
cinder::app::console() << "Stiffness matrix:\n" << eigenStiffness << std::endl;
if (!eigenStiffness.isApprox(eigenStiffness.transpose())) cinder::app::console() << " Stiffness matrix is not symmetric!!" << std::endl;
cinder::app::console() << "Force vector:\n" << eigenForce.transpose() << std::endl;
cinder::app::console() << "Solution:\n" << DebugUtils::vectorToEigen(state_.displacements_).transpose() << std::endl;
Eigen::VectorXf eigenSolution = eigenStiffness.fullPivLu().solve(eigenForce);
cinder::app::console() << "Solution using Eigen:\n" << eigenSolution.transpose() << std::endl;
DebugUtils::eigenToVector(eigenSolution, state_.displacements_);
#endif
}
worker->setStatus("Mesh: done");
}
void SoftBodyMesh3D::updateSettings()
{
precomputed_.bodyForces_.setZero();
precomputed_.lumpedMass_.setZero();
computeMassMatrix(input_, settings_, precomputed_.lumpedMass_);
computeBodyForces(input_, settings_, precomputed_.bodyForces_);
CI_LOG_I("Settings updated, mass matrix and body forces recomputed");
}
void SoftBodyMesh3D::allocateTemporary(const Input& input)
{
forces_ = Vector3X(input.numFreeNodes_);
stiffness_ = SMatrix3x3(input.sparsityPattern_);
newmarkA_ = SMatrix3x3(input.sparsityPattern_);
newmarkB_ = Vector3X(input.numFreeNodes_);
}
void SoftBodyMesh3D::resetTemporary()
{
forces_.setZero();
stiffness_.setZero();
}
}
|
d6207d64d3f36ce513f09074d1cd3cb1d4f518e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "getForces.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *g_pos = NULL;
hipMalloc(&g_pos, XSIZE*YSIZE);
float3 *force = NULL;
hipMalloc(&force, XSIZE*YSIZE);
int offset = 2;
int device_ct = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
getForces), dim3(gridBlock),dim3(threadBlock), 0, 0, g_pos,force,offset,device_ct);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
getForces), dim3(gridBlock),dim3(threadBlock), 0, 0, g_pos,force,offset,device_ct);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
getForces), dim3(gridBlock),dim3(threadBlock), 0, 0, g_pos,force,offset,device_ct);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d6207d64d3f36ce513f09074d1cd3cb1d4f518e1.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "getForces.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *g_pos = NULL;
cudaMalloc(&g_pos, XSIZE*YSIZE);
float3 *force = NULL;
cudaMalloc(&force, XSIZE*YSIZE);
int offset = 2;
int device_ct = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
getForces<<<gridBlock,threadBlock>>>(g_pos,force,offset,device_ct);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
getForces<<<gridBlock,threadBlock>>>(g_pos,force,offset,device_ct);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
getForces<<<gridBlock,threadBlock>>>(g_pos,force,offset,device_ct);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
54595d5306fab39dc7b4601c5d52969da78c01b9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LogOpGPU(const int nthreads,
const Dtype* in, Dtype* out, const Dtype eps)
{
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = log(max(in[index], eps));
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::compute_intermediate_values_of_gpu() {
// compute the corresponding variables
const int count = prob_.count();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* ones_data = ones_.gpu_data();
Dtype* log_prob_data = log_prob_.mutable_gpu_data();
Dtype* power_prob_data = power_prob_.mutable_gpu_data();
/// log(p_t)
const int nthreads = prob_.count();
const Dtype eps = Dtype(FLT_MIN); // where FLT_MIN = 1.17549e-38, here u can change it
// more stable
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LogOpGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, log_prob_data, eps);
/// caffe_gpu_log(count, prob_data, log_prob_data);
/// (1 - p_t) ^ gamma
caffe_gpu_sub(count, ones_data, prob_data, power_prob_data);
caffe_gpu_powx(count, power_prob_.gpu_data(), gamma_, power_prob_data);
caffe_gpu_scal(count, alpha_, power_prob_data);
}
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
const Dtype* label,
Dtype* loss,
const int num,
const int dim,
const int spatial_dim,
const bool has_ignore_label_,
const int ignore_label_,
Dtype* counts)
{
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
int ind = n * dim + label_value * spatial_dim + s;
// loss[index] = -max(power_prob_data[ind] * log_prob_data[ind], Dtype(log(Dtype(FLT_MIN))));
loss[index] = -power_prob_data[ind] * log_prob_data[ind];
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
// compute all needed values
compute_intermediate_values_of_gpu();
// const Dtype* prob_data = prob_.gpu_data();
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, log_prob_data, power_prob_data,
label, loss_data,outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads,
const Dtype* top,
const Dtype* label,
const Dtype* prob_data,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
Dtype* bottom_diff,
const int num,
const int dim,
const int spatial_dim,
const Dtype gamma,
const bool has_ignore_label_,
const int ignore_label_,
const Dtype eps,
Dtype* counts)
{
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
// the gradient from FL w.r.t p_t, here ignore the `sign`
int ind_i = n * dim + label_value * spatial_dim + s; // index of ground-truth label
Dtype grad = 0 - gamma * (power_prob_data[ind_i] / max(1 - prob_data[ind_i], eps))
* log_prob_data[ind_i] * prob_data[ind_i]
+ power_prob_data[ind_i];
// the gradient w.r.t input data x
for (int c = 0; c < channels; ++c) {
int ind_j = n * dim + c * spatial_dim + s;
if(c == label_value) {
// if i == j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * (prob_data[ind_i] - 1);
} else {
// if i != j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * prob_data[ind_j];
}
}
// count
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
const Dtype eps = 1e-10;
// intermidiate
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, prob_data, log_prob_data, power_prob_data,
bottom_diff, outer_num_, dim, inner_num_, gamma_, has_ignore_label_, ignore_label_, eps, counts);
// Only launch another CUDA kernel if we actually need the count of valid outputs.
Dtype valid_count = -1;
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
// Scale gradient
const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
|
54595d5306fab39dc7b4601c5d52969da78c01b9.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LogOpGPU(const int nthreads,
const Dtype* in, Dtype* out, const Dtype eps)
{
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = log(max(in[index], eps));
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::compute_intermediate_values_of_gpu() {
// compute the corresponding variables
const int count = prob_.count();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* ones_data = ones_.gpu_data();
Dtype* log_prob_data = log_prob_.mutable_gpu_data();
Dtype* power_prob_data = power_prob_.mutable_gpu_data();
/// log(p_t)
const int nthreads = prob_.count();
const Dtype eps = Dtype(FLT_MIN); // where FLT_MIN = 1.17549e-38, here u can change it
// more stable
// NOLINT_NEXT_LINE(whitespace/operators)
LogOpGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, log_prob_data, eps);
/// caffe_gpu_log(count, prob_data, log_prob_data);
/// (1 - p_t) ^ gamma
caffe_gpu_sub(count, ones_data, prob_data, power_prob_data);
caffe_gpu_powx(count, power_prob_.gpu_data(), gamma_, power_prob_data);
caffe_gpu_scal(count, alpha_, power_prob_data);
}
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
const Dtype* label,
Dtype* loss,
const int num,
const int dim,
const int spatial_dim,
const bool has_ignore_label_,
const int ignore_label_,
Dtype* counts)
{
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
int ind = n * dim + label_value * spatial_dim + s;
// loss[index] = -max(power_prob_data[ind] * log_prob_data[ind], Dtype(log(Dtype(FLT_MIN))));
loss[index] = -power_prob_data[ind] * log_prob_data[ind];
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
// compute all needed values
compute_intermediate_values_of_gpu();
// const Dtype* prob_data = prob_.gpu_data();
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, log_prob_data, power_prob_data,
label, loss_data,outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads,
const Dtype* top,
const Dtype* label,
const Dtype* prob_data,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
Dtype* bottom_diff,
const int num,
const int dim,
const int spatial_dim,
const Dtype gamma,
const bool has_ignore_label_,
const int ignore_label_,
const Dtype eps,
Dtype* counts)
{
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
// the gradient from FL w.r.t p_t, here ignore the `sign`
int ind_i = n * dim + label_value * spatial_dim + s; // index of ground-truth label
Dtype grad = 0 - gamma * (power_prob_data[ind_i] / max(1 - prob_data[ind_i], eps))
* log_prob_data[ind_i] * prob_data[ind_i]
+ power_prob_data[ind_i];
// the gradient w.r.t input data x
for (int c = 0; c < channels; ++c) {
int ind_j = n * dim + c * spatial_dim + s;
if(c == label_value) {
// if i == j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * (prob_data[ind_i] - 1);
} else {
// if i != j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * prob_data[ind_j];
}
}
// count
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
const Dtype eps = 1e-10;
// intermidiate
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, prob_data, log_prob_data, power_prob_data,
bottom_diff, outer_num_, dim, inner_num_, gamma_, has_ignore_label_, ignore_label_, eps, counts);
// Only launch another CUDA kernel if we actually need the count of valid outputs.
Dtype valid_count = -1;
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
// Scale gradient
const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
|
8478e66eea8d9b721a727e34723078b83739c783.hip
|
// !!! This is a file automatically generated by hipify!!!
// Render many spotlights on an image, computing multiple results per thread
// in order to increase instruction-level parallelism.
// Example for video 3.5.
#include <cmath>
#include <cstdint>
#include <iostream>
// Standard CUDA API functions
#include <hip/hip_runtime_api.h>
#include "../utils.h"
struct light {
float x;
float y;
float radius;
float brightness;
};
struct lots_of_lights {
unsigned int count;
light lights[1024];
};
__constant__ lots_of_lights dev_lights;
__device__ float clamp(float value) { return value > 1.0f ? 1.0f : value; }
__device__ float light_brightness(float x, float y, unsigned int width,
unsigned int height, const light &light)
{
float norm_x = x / width;
float norm_y = y / height;
float dx = norm_x - light.x;
float dy = norm_y - light.y;
float distance_squared = dx * dx + dy * dy;
if (distance_squared > light.radius * light.radius) {
return 0;
}
float distance = sqrtf(distance_squared);
float scaled_distance = distance / light.radius;
if (scaled_distance > 0.8) {
return (1.0f - (scaled_distance - 0.8f) * 5.0f) * light.brightness;
} else {
return light.brightness;
}
}
template <typename T>
__device__ T *pointer2d(T *base_pointer, int x, int y, size_t pitch)
{
return (T *)((char *)base_pointer + y * pitch) + x;
}
const int OUTPUTS_PER_THREAD = 2;
__global__ void spotlights(const image source, image dest, unsigned int width,
unsigned int height, size_t pitch, float ambient)
{
for (int i = 0; i < OUTPUTS_PER_THREAD; i++) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = OUTPUTS_PER_THREAD * blockIdx.y * blockDim.y + threadIdx.y +
i * blockDim.y;
if (x >= width || y >= height) return;
float brightness = ambient;
for (int i = 0; i < dev_lights.count; i++) {
brightness += light_brightness(x, y, width, height, dev_lights.lights[i]);
}
*pointer2d(dest.red, x, y, pitch) =
clamp(*pointer2d(source.red, x, y, pitch) * brightness);
*pointer2d(dest.green, x, y, pitch) =
clamp(*pointer2d(source.green, x, y, pitch) * brightness);
*pointer2d(dest.blue, x, y, pitch) =
clamp(*pointer2d(source.blue, x, y, pitch) * brightness);
}
}
int main(int argc, char **argv)
{
auto params = set_up_test_planar(argc, argv);
image input2d, output2d;
size_t byte_width = params.width * sizeof(float);
size_t pitch;
// Allocate 2D aligned image
cudaCheckError(
hipMallocPitch(&input2d.red, &pitch, byte_width, params.height));
// Copy from 1D to 2D image
cudaCheckError(hipMemcpy2D(input2d.red, pitch, params.input_image.red,
byte_width, byte_width, params.height,
hipMemcpyDeviceToDevice));
// Allocate and copy other channels
// Note: pitch will be the same for all of these allocations
cudaCheckError(
hipMallocPitch(&input2d.green, &pitch, byte_width, params.height));
cudaCheckError(
hipMallocPitch(&input2d.blue, &pitch, byte_width, params.height));
cudaCheckError(
hipMallocPitch(&output2d.red, &pitch, byte_width, params.height));
cudaCheckError(
hipMallocPitch(&output2d.green, &pitch, byte_width, params.height));
cudaCheckError(
hipMallocPitch(&output2d.blue, &pitch, byte_width, params.height));
cudaCheckError(hipMemcpy2D(input2d.green, pitch, params.input_image.green,
byte_width, byte_width, params.height,
hipMemcpyDeviceToDevice));
cudaCheckError(hipMemcpy2D(input2d.blue, pitch, params.input_image.blue,
byte_width, byte_width, params.height,
hipMemcpyDeviceToDevice));
lots_of_lights lights = {1024};
float spacing = 1.0f / 32.0f;
for (int x = 0; x < 32; x++) {
for (int y = 0; y < 32; y++) {
int index = y * 32 + x;
lights.lights[index] = {x * spacing, y * spacing, 0.05, 0.2};
}
}
cudaCheckError(
hipMemcpyToSymbol(dev_lights, &lights, sizeof(lots_of_lights)));
dim3 BLOCK_DIM(32, 16);
dim3 grid_dim(
(params.width + BLOCK_DIM.x - 1) / BLOCK_DIM.x,
(params.height + BLOCK_DIM.y - 1) / (BLOCK_DIM.y * OUTPUTS_PER_THREAD));
{
KernelTimer t;
hipLaunchKernelGGL(( spotlights), dim3(grid_dim), dim3(BLOCK_DIM), 0, 0, input2d, output2d, params.width,
params.height, pitch, 0.0);
}
cudaCheckError(hipMemcpy2D(params.output_image.red, byte_width, output2d.red,
pitch, byte_width, params.height,
hipMemcpyDeviceToDevice));
cudaCheckError(hipMemcpy2D(params.output_image.green, byte_width,
output2d.green, pitch, byte_width, params.height,
hipMemcpyDeviceToDevice));
cudaCheckError(hipMemcpy2D(params.output_image.blue, byte_width,
output2d.blue, pitch, byte_width, params.height,
hipMemcpyDeviceToDevice));
free_image(input2d);
free_image(output2d);
finish_test_planar(params);
return 0;
}
|
8478e66eea8d9b721a727e34723078b83739c783.cu
|
// Render many spotlights on an image, computing multiple results per thread
// in order to increase instruction-level parallelism.
// Example for video 3.5.
#include <cmath>
#include <cstdint>
#include <iostream>
// Standard CUDA API functions
#include <cuda_runtime_api.h>
#include "../utils.h"
struct light {
float x;
float y;
float radius;
float brightness;
};
struct lots_of_lights {
unsigned int count;
light lights[1024];
};
__constant__ lots_of_lights dev_lights;
__device__ float clamp(float value) { return value > 1.0f ? 1.0f : value; }
__device__ float light_brightness(float x, float y, unsigned int width,
unsigned int height, const light &light)
{
float norm_x = x / width;
float norm_y = y / height;
float dx = norm_x - light.x;
float dy = norm_y - light.y;
float distance_squared = dx * dx + dy * dy;
if (distance_squared > light.radius * light.radius) {
return 0;
}
float distance = sqrtf(distance_squared);
float scaled_distance = distance / light.radius;
if (scaled_distance > 0.8) {
return (1.0f - (scaled_distance - 0.8f) * 5.0f) * light.brightness;
} else {
return light.brightness;
}
}
template <typename T>
__device__ T *pointer2d(T *base_pointer, int x, int y, size_t pitch)
{
return (T *)((char *)base_pointer + y * pitch) + x;
}
const int OUTPUTS_PER_THREAD = 2;
__global__ void spotlights(const image source, image dest, unsigned int width,
unsigned int height, size_t pitch, float ambient)
{
for (int i = 0; i < OUTPUTS_PER_THREAD; i++) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = OUTPUTS_PER_THREAD * blockIdx.y * blockDim.y + threadIdx.y +
i * blockDim.y;
if (x >= width || y >= height) return;
float brightness = ambient;
for (int i = 0; i < dev_lights.count; i++) {
brightness += light_brightness(x, y, width, height, dev_lights.lights[i]);
}
*pointer2d(dest.red, x, y, pitch) =
clamp(*pointer2d(source.red, x, y, pitch) * brightness);
*pointer2d(dest.green, x, y, pitch) =
clamp(*pointer2d(source.green, x, y, pitch) * brightness);
*pointer2d(dest.blue, x, y, pitch) =
clamp(*pointer2d(source.blue, x, y, pitch) * brightness);
}
}
int main(int argc, char **argv)
{
auto params = set_up_test_planar(argc, argv);
image input2d, output2d;
size_t byte_width = params.width * sizeof(float);
size_t pitch;
// Allocate 2D aligned image
cudaCheckError(
cudaMallocPitch(&input2d.red, &pitch, byte_width, params.height));
// Copy from 1D to 2D image
cudaCheckError(cudaMemcpy2D(input2d.red, pitch, params.input_image.red,
byte_width, byte_width, params.height,
cudaMemcpyDeviceToDevice));
// Allocate and copy other channels
// Note: pitch will be the same for all of these allocations
cudaCheckError(
cudaMallocPitch(&input2d.green, &pitch, byte_width, params.height));
cudaCheckError(
cudaMallocPitch(&input2d.blue, &pitch, byte_width, params.height));
cudaCheckError(
cudaMallocPitch(&output2d.red, &pitch, byte_width, params.height));
cudaCheckError(
cudaMallocPitch(&output2d.green, &pitch, byte_width, params.height));
cudaCheckError(
cudaMallocPitch(&output2d.blue, &pitch, byte_width, params.height));
cudaCheckError(cudaMemcpy2D(input2d.green, pitch, params.input_image.green,
byte_width, byte_width, params.height,
cudaMemcpyDeviceToDevice));
cudaCheckError(cudaMemcpy2D(input2d.blue, pitch, params.input_image.blue,
byte_width, byte_width, params.height,
cudaMemcpyDeviceToDevice));
lots_of_lights lights = {1024};
float spacing = 1.0f / 32.0f;
for (int x = 0; x < 32; x++) {
for (int y = 0; y < 32; y++) {
int index = y * 32 + x;
lights.lights[index] = {x * spacing, y * spacing, 0.05, 0.2};
}
}
cudaCheckError(
cudaMemcpyToSymbol(dev_lights, &lights, sizeof(lots_of_lights)));
dim3 BLOCK_DIM(32, 16);
dim3 grid_dim(
(params.width + BLOCK_DIM.x - 1) / BLOCK_DIM.x,
(params.height + BLOCK_DIM.y - 1) / (BLOCK_DIM.y * OUTPUTS_PER_THREAD));
{
KernelTimer t;
spotlights<<<grid_dim, BLOCK_DIM>>>(input2d, output2d, params.width,
params.height, pitch, 0.0);
}
cudaCheckError(cudaMemcpy2D(params.output_image.red, byte_width, output2d.red,
pitch, byte_width, params.height,
cudaMemcpyDeviceToDevice));
cudaCheckError(cudaMemcpy2D(params.output_image.green, byte_width,
output2d.green, pitch, byte_width, params.height,
cudaMemcpyDeviceToDevice));
cudaCheckError(cudaMemcpy2D(params.output_image.blue, byte_width,
output2d.blue, pitch, byte_width, params.height,
cudaMemcpyDeviceToDevice));
free_image(input2d);
free_image(output2d);
finish_test_planar(params);
return 0;
}
|
34527c60d931ed65d688b0147d15a2bb3f184ce8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <vector>
#include <iostream>
__global__ void grayscale( unsigned char * rgb, unsigned char * g, std::size_t cols, std::size_t rows ) {
auto i = blockIdx.x*blockDim.x+ threadIdx.x;
auto j = blockIdx.y*blockDim.y+ threadIdx.y;
if( i < cols && j < rows ) {
g[ j * cols + i ] = (
307 * rgb[ 3 * ( j * cols + i ) ]
+ 604 * rgb[ 3 * ( j * cols + i ) + 1 ]
+ 113 * rgb[ 3 * ( j * cols + i ) + 2 ]
) / 1024;
}
}
__global__ void sobel(unsigned char * g_in, unsigned char * g, std::size_t cols, std::size_t rows ){
auto i = blockIdx.x*blockDim.x+ threadIdx.x;
auto j = blockIdx.y*blockDim.y+ threadIdx.y;
int h, v, res;
__syncthreads( );
if(i>0 && j>0 && i < cols-1 && j < rows-1){
// Horizontal
h = g_in[((j - 1) * cols + i - 1)] - g_in[((j - 1) * cols + i + 1)]
+ 2 * g_in[( j * cols + i - 1)] - 2 * g_in[( j * cols + i + 1)]
+ g_in[((j + 1) * cols + i - 1)] - g_in[((j + 1) * cols + i + 1)];
// Vertical
v = g_in[((j - 1) * cols + i - 1)] - g_in[((j + 1) * cols + i - 1)]
+ 2 * g_in[((j - 1) * cols + i )] - 2 * g_in[((j + 1) * cols + i )]
+ g_in[((j - 1) * cols + i + 1)] - g_in[((j + 1) * cols + i + 1)];
//h = h > 255 ? 255 : h;
//v = v > 255 ? 255 : v;
res = h*h + v*v;
res = res > 255*255 ? res = 255*255 : res;
g[(j * cols + i) ] = sqrt( (float) res);
}
}
int main()
{
cv::Mat m_in = cv::imread("../in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( rows * cols ); // image de sortie grayscale.
std::vector< unsigned char > gsobel( rows * cols ); // image de sortie sobel.
cv::Mat m_out( rows, cols, CV_8UC1, gsobel.data() );
unsigned char * rgb_d;
unsigned char * g_d;
unsigned char * gsobel_d;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipMalloc(&rgb_d,3*rows*cols); // allocation pour l'image d'entre sur le device.
hipMalloc(&g_d,rows*cols); // allocation pour l'image de sortie du grayscale sur le device.
hipMemcpy(rgb_d,rgb,3*rows*cols,hipMemcpyHostToDevice); // copie de l'image d'entre vers le device.
dim3 t( 32, 32 );
dim3 b( ( cols - 1) / t.x + 1 , ( rows - 1 ) / t.y + 1 );
hipLaunchKernelGGL(( grayscale), dim3(b), dim3(t) , 0, 0, rgb_d,g_d,cols,rows );
hipMemcpy(gsobel.data(),g_d,rows*cols, hipMemcpyDeviceToHost); // rcupration de l'image en niveaux de gris sur l'hte.
cv::imwrite( "../out.jpg", m_out );
cv::Mat m_in2 = cv::imread("../out.jpg", cv::IMREAD_UNCHANGED );
rgb = m_in2.data;
rows = m_in2.rows;
cols = m_in2.cols;
hipMemcpy(g_d,rgb,rows*cols,hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto err = hipGetLastError();
if( err != hipSuccess )
{
std::cerr << hipGetErrorString( err ) << std::endl;
}
hipMalloc(&gsobel_d, rows*cols);
hipLaunchKernelGGL(( sobel), dim3(b), dim3(t) , 0, 0, g_d,gsobel_d,cols,rows);
hipDeviceSynchronize();
err = hipGetLastError();
if( err != hipSuccess )
{
std::cerr << "Last Sync" << hipGetErrorString( err ) << std::endl;
}
hipMemcpy(gsobel.data(),gsobel_d,rows*cols, hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapseTime;
hipEventElapsedTime(&elapseTime,start,stop);
std::cout<<elapseTime<<"ms"<<std::endl;
cv::imwrite( "../out.jpg", m_out ); // sauvegarde de l'image.
hipFree( rgb_d );
hipFree( g_d);
hipFree( gsobel_d);
return 0;
}
|
34527c60d931ed65d688b0147d15a2bb3f184ce8.cu
|
#include <opencv2/opencv.hpp>
#include <vector>
#include <iostream>
__global__ void grayscale( unsigned char * rgb, unsigned char * g, std::size_t cols, std::size_t rows ) {
auto i = blockIdx.x*blockDim.x+ threadIdx.x;
auto j = blockIdx.y*blockDim.y+ threadIdx.y;
if( i < cols && j < rows ) {
g[ j * cols + i ] = (
307 * rgb[ 3 * ( j * cols + i ) ]
+ 604 * rgb[ 3 * ( j * cols + i ) + 1 ]
+ 113 * rgb[ 3 * ( j * cols + i ) + 2 ]
) / 1024;
}
}
__global__ void sobel(unsigned char * g_in, unsigned char * g, std::size_t cols, std::size_t rows ){
auto i = blockIdx.x*blockDim.x+ threadIdx.x;
auto j = blockIdx.y*blockDim.y+ threadIdx.y;
int h, v, res;
__syncthreads( );
if(i>0 && j>0 && i < cols-1 && j < rows-1){
// Horizontal
h = g_in[((j - 1) * cols + i - 1)] - g_in[((j - 1) * cols + i + 1)]
+ 2 * g_in[( j * cols + i - 1)] - 2 * g_in[( j * cols + i + 1)]
+ g_in[((j + 1) * cols + i - 1)] - g_in[((j + 1) * cols + i + 1)];
// Vertical
v = g_in[((j - 1) * cols + i - 1)] - g_in[((j + 1) * cols + i - 1)]
+ 2 * g_in[((j - 1) * cols + i )] - 2 * g_in[((j + 1) * cols + i )]
+ g_in[((j - 1) * cols + i + 1)] - g_in[((j + 1) * cols + i + 1)];
//h = h > 255 ? 255 : h;
//v = v > 255 ? 255 : v;
res = h*h + v*v;
res = res > 255*255 ? res = 255*255 : res;
g[(j * cols + i) ] = sqrt( (float) res);
}
}
int main()
{
cv::Mat m_in = cv::imread("../in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( rows * cols ); // image de sortie grayscale.
std::vector< unsigned char > gsobel( rows * cols ); // image de sortie sobel.
cv::Mat m_out( rows, cols, CV_8UC1, gsobel.data() );
unsigned char * rgb_d;
unsigned char * g_d;
unsigned char * gsobel_d;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMalloc(&rgb_d,3*rows*cols); // allocation pour l'image d'entrée sur le device.
cudaMalloc(&g_d,rows*cols); // allocation pour l'image de sortie du grayscale sur le device.
cudaMemcpy(rgb_d,rgb,3*rows*cols,cudaMemcpyHostToDevice); // copie de l'image d'entrée vers le device.
dim3 t( 32, 32 );
dim3 b( ( cols - 1) / t.x + 1 , ( rows - 1 ) / t.y + 1 );
grayscale<<< b, t >>>( rgb_d,g_d,cols,rows );
cudaMemcpy(gsobel.data(),g_d,rows*cols, cudaMemcpyDeviceToHost); // récupération de l'image en niveaux de gris sur l'hôte.
cv::imwrite( "../out.jpg", m_out );
cv::Mat m_in2 = cv::imread("../out.jpg", cv::IMREAD_UNCHANGED );
rgb = m_in2.data;
rows = m_in2.rows;
cols = m_in2.cols;
cudaMemcpy(g_d,rgb,rows*cols,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto err = cudaGetLastError();
if( err != cudaSuccess )
{
std::cerr << cudaGetErrorString( err ) << std::endl;
}
cudaMalloc(&gsobel_d, rows*cols);
sobel<<< b, t >>>(g_d,gsobel_d,cols,rows);
cudaDeviceSynchronize();
err = cudaGetLastError();
if( err != cudaSuccess )
{
std::cerr << "Last Sync" << cudaGetErrorString( err ) << std::endl;
}
cudaMemcpy(gsobel.data(),gsobel_d,rows*cols, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapseTime;
cudaEventElapsedTime(&elapseTime,start,stop);
std::cout<<elapseTime<<"ms"<<std::endl;
cv::imwrite( "../out.jpg", m_out ); // sauvegarde de l'image.
cudaFree( rgb_d );
cudaFree( g_d);
cudaFree( gsobel_d);
return 0;
}
|
1873f6a7f28947ca3fa10e64f333179368fc8652.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// CUDA DCA Driver
//
//This file invokes all of the necessary function calls to prepare
//and simulate a compound pendulum system through the use of the
//recursive DCA algorithm. The majority of this algorithm is run
//on the gpu. Output is created in a format that is
//readable in python for answer checking and graphing purposes.
//Included Files
#include <malloc.h>
#include <iostream>
#include <math.h>
#include "funct_bin/classes.h"
#include "d_code/deviceDisassemble.h"
#include "d_code/deviceAssemble.h"
#include "d_code/deviceInitialize.h"
#include "d_code/deviceFuncts.h"
#include "funct_bin/npy.h"
#include <math.h>
#include <fstream>
#include <limits>
//Function Prototypes
// Function found in RK45.cu
void RK_45(double state[], double step, int n, InitBody *bs, Joint *js,double Y[], int cut_off);
// Functions found in Functs.cu
void pend_init(InitBody *bs,int n,double mass, double length);
void horizontal_drop(double x[],int n);
void set_up(double A[], double B[], double C[], int n , double h);
//Main function
int main()
{
int n=0;
int cut_off;
std::ofstream timedata;
std::ofstream numbods;
numbods.open("numbods.mtx");
timedata.open("graph_gpucpu1.mtx");
for(int numa = 1 ; numa<4; numa++)
{
n=0;
while(n<80000)
{
if(n<500)
{
n+=10;
}
else if( n<2000)
{
n+=100;
}
else if(n< 10000)
{
n+= 1000;
}
else
{
n+=10000;
}
int x = n;
for(int c =0; c<numa; c++)
{
if(x==1)
{
cut_off=0;
}
else if( x%2==0)
{
x=x/2;
}
else
{
x++;
x=x/2;
}
}
std::cout<<x<<std::endl;
if(cut_off !=0)
{
cut_off =x;
}
//Variable Declarations
InitBody* bodies; //List of bodies used for initialization only
Joint* joints; //List of joints between bodies NOTE: This joint list is not used in this version
double *inits; //Initial conditions
double *Y; //Solution to each timestep
//std::ofstream myfile;
//std::ofstream myfile2;
//myfile2.open("Vals.mtx");
//myfile.open ("output.mtx");
//System Setup
bodies = new InitBody[n]; //List of initialization bodies is length n
joints = new Joint[n]; //List of joints is length n
inits = new double[2*n]; //Initial conditions are length 2*n
Y = new double[2*n]; //Timestep solution is length 2*n
pend_init(bodies,n,1.0,1.0); //Initialize mass, length, and inertia of all bodies
//Time Setup
double tstep= 0.001; //Length of a timestep [s]
double tfinal =0.005; //Final time [s]
int tlen = (int) floor(tfinal/tstep)+1; //Number of timesteps
//Matrix Output Setup
//int shape1[2] = { tlen , 2*n }, fortran_order = 0; //Shape of solution matrix
//int shape2[2] = { 2 , n+1 }; //Shape of matrix holding information to calculate the energy
//double Vals[2][n+1]; //Matrix holding information to calculate the energy
typedef std::numeric_limits< double > dbl;
std::cout.precision(dbl::digits10);
//myfile2<<tstep<<" ";
//Vals[0][0]=tstep; //Save the length of a timestep for plotting purposes
//Vals[1][0]=tfinal; //Save the final time for plotting purposes
hipEvent_t beginEvent;
hipEvent_t endEvent;
hipEventCreate( &beginEvent );
hipEventCreate( &endEvent );
//System Initialization
horizontal_drop(inits,n); //Set the initial conditions
//Save the initial conditions in the solution matrix
//myfile << "\n";
hipEventRecord( beginEvent, 0 );
//Numerical Integration
for(int t=1; t<tlen; t++) //Loop through every timestep
{
RK_45(inits,tstep,n,bodies,joints,Y,cut_off); //Find the solution at that timestep
for(int i = 0; i<2*n;i++) //Loop through the solution
{
inits[i]=Y[i]; //Use the solution as the initial conditions for the next timestep
//myfile << inits[i]<<" ";
}
//myfile << "\n";
}
hipEventRecord( endEvent, 0 );
hipEventSynchronize( endEvent );
float timeValue;
hipEventElapsedTime( &timeValue, beginEvent, endEvent );
timedata<< timeValue << " ";
numbods<<n<<" ";
if ( hipSuccess != hipGetLastError() )
printf( "Error!\n" );
std::cout << n << std::endl;
//Solution Output
//npy_save_double("Vals.npy",fortran_order,2,shape2,&Vals[0][0]); //Output values to find energy
//Free memory
delete[] inits;
delete[] Y;
delete[] bodies;
delete[] joints;
//myfile.close();
//myfile2.close();
//std::cout<<n<<std::endl;
}
timedata<<"\n";
numbods<<"\n";
}
numbods.close();
timedata.close();
return EXIT_SUCCESS; //Program completed successfully
}
|
1873f6a7f28947ca3fa10e64f333179368fc8652.cu
|
//
// CUDA DCA Driver
//
//This file invokes all of the necessary function calls to prepare
//and simulate a compound pendulum system through the use of the
//recursive DCA algorithm. The majority of this algorithm is run
//on the gpu. Output is created in a format that is
//readable in python for answer checking and graphing purposes.
//Included Files
#include <malloc.h>
#include <iostream>
#include <math.h>
#include "funct_bin/classes.h"
#include "d_code/deviceDisassemble.h"
#include "d_code/deviceAssemble.h"
#include "d_code/deviceInitialize.h"
#include "d_code/deviceFuncts.h"
#include "funct_bin/npy.h"
#include <math.h>
#include <fstream>
#include <limits>
//Function Prototypes
// Function found in RK45.cu
void RK_45(double state[], double step, int n, InitBody *bs, Joint *js,double Y[], int cut_off);
// Functions found in Functs.cu
void pend_init(InitBody *bs,int n,double mass, double length);
void horizontal_drop(double x[],int n);
void set_up(double A[], double B[], double C[], int n , double h);
//Main function
int main()
{
int n=0;
int cut_off;
std::ofstream timedata;
std::ofstream numbods;
numbods.open("numbods.mtx");
timedata.open("graph_gpucpu1.mtx");
for(int numa = 1 ; numa<4; numa++)
{
n=0;
while(n<80000)
{
if(n<500)
{
n+=10;
}
else if( n<2000)
{
n+=100;
}
else if(n< 10000)
{
n+= 1000;
}
else
{
n+=10000;
}
int x = n;
for(int c =0; c<numa; c++)
{
if(x==1)
{
cut_off=0;
}
else if( x%2==0)
{
x=x/2;
}
else
{
x++;
x=x/2;
}
}
std::cout<<x<<std::endl;
if(cut_off !=0)
{
cut_off =x;
}
//Variable Declarations
InitBody* bodies; //List of bodies used for initialization only
Joint* joints; //List of joints between bodies NOTE: This joint list is not used in this version
double *inits; //Initial conditions
double *Y; //Solution to each timestep
//std::ofstream myfile;
//std::ofstream myfile2;
//myfile2.open("Vals.mtx");
//myfile.open ("output.mtx");
//System Setup
bodies = new InitBody[n]; //List of initialization bodies is length n
joints = new Joint[n]; //List of joints is length n
inits = new double[2*n]; //Initial conditions are length 2*n
Y = new double[2*n]; //Timestep solution is length 2*n
pend_init(bodies,n,1.0,1.0); //Initialize mass, length, and inertia of all bodies
//Time Setup
double tstep= 0.001; //Length of a timestep [s]
double tfinal =0.005; //Final time [s]
int tlen = (int) floor(tfinal/tstep)+1; //Number of timesteps
//Matrix Output Setup
//int shape1[2] = { tlen , 2*n }, fortran_order = 0; //Shape of solution matrix
//int shape2[2] = { 2 , n+1 }; //Shape of matrix holding information to calculate the energy
//double Vals[2][n+1]; //Matrix holding information to calculate the energy
typedef std::numeric_limits< double > dbl;
std::cout.precision(dbl::digits10);
//myfile2<<tstep<<" ";
//Vals[0][0]=tstep; //Save the length of a timestep for plotting purposes
//Vals[1][0]=tfinal; //Save the final time for plotting purposes
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
cudaEventCreate( &beginEvent );
cudaEventCreate( &endEvent );
//System Initialization
horizontal_drop(inits,n); //Set the initial conditions
//Save the initial conditions in the solution matrix
//myfile << "\n";
cudaEventRecord( beginEvent, 0 );
//Numerical Integration
for(int t=1; t<tlen; t++) //Loop through every timestep
{
RK_45(inits,tstep,n,bodies,joints,Y,cut_off); //Find the solution at that timestep
for(int i = 0; i<2*n;i++) //Loop through the solution
{
inits[i]=Y[i]; //Use the solution as the initial conditions for the next timestep
//myfile << inits[i]<<" ";
}
//myfile << "\n";
}
cudaEventRecord( endEvent, 0 );
cudaEventSynchronize( endEvent );
float timeValue;
cudaEventElapsedTime( &timeValue, beginEvent, endEvent );
timedata<< timeValue << " ";
numbods<<n<<" ";
if ( cudaSuccess != cudaGetLastError() )
printf( "Error!\n" );
std::cout << n << std::endl;
//Solution Output
//npy_save_double("Vals.npy",fortran_order,2,shape2,&Vals[0][0]); //Output values to find energy
//Free memory
delete[] inits;
delete[] Y;
delete[] bodies;
delete[] joints;
//myfile.close();
//myfile2.close();
//std::cout<<n<<std::endl;
}
timedata<<"\n";
numbods<<"\n";
}
numbods.close();
timedata.close();
return EXIT_SUCCESS; //Program completed successfully
}
|
cb04c1181c6d842bc23a4b64dbe573bf4d5cb236.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fully Convolutional Instance-aware Semantic Segmentation
// Copyright (c) 2017 Microsoft
// Licensed under The MIT License [see LICENSE for details]
// Written by Haozhi Qi
// ------------------------------------------------------------------
#include "gpu_mv.hpp"
#include <iostream>
const int CAFFE_CUDA_NUM_THREADS = 512;
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}
#define CUDA_POST_KERNEL_CHECK CUDA_CHECK(hipPeekAtLastError())
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
__device__ float bilinear_interpolate(const float* bottom_data,
const int input_height, const int input_width,
float inverse_y, float inverse_x) {
// deal with cases that inverse elements are out of feature map boundary
if (inverse_y <= 0) inverse_y = 0;
if (inverse_x <= 0) inverse_x = 0;
int h_low = (int) inverse_y;
int w_low = (int) inverse_x;
int h_high;
int w_high;
// handle boundary case
if (h_low >= input_height - 1) {
h_high = h_low = input_height - 1;
inverse_y = (float) h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= input_width - 1) {
w_high = w_low = input_width - 1;
inverse_x = (float) w_low;
} else {
w_high = w_low + 1;
}
float lh = inverse_y - h_low;
float lw = inverse_x - w_low;
float hh = 1 - lh, hw = 1 - lw;
// corner point of interpolation
float v1 = bottom_data[h_low * input_width + w_low];
float v2 = bottom_data[h_low * input_width + w_high];
float v3 = bottom_data[h_high * input_width + w_low];
float v4 = bottom_data[h_high * input_width + w_high];
// weight for each corner
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
// do bilinear interpolation
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__global__ void mask_render(const int nthreads, const float* input_box, const float* input_mask, const int box_dim, const int mask_size,
const int image_height, const int image_width, float* target_buffer) {
CUDA_KERNEL_LOOP(index, nthreads) {
// target buffer's size if (n * h * w)
int w = index % image_width;
int h = (index / image_width) % image_height;
int n = index / image_width / image_height;
// get the n-th boxes
const float* offset_box = input_box + n * box_dim;
const float* offset_mask = input_mask + n * mask_size * mask_size;
const float box_x1 = offset_box[0];
const float box_y1 = offset_box[1];
const float box_x2 = offset_box[2];
const float box_y2 = offset_box[3];
// check whether pixel is out of box bound
if (w < box_x1 || w > box_x2 || h < box_y1 || h > box_y2) {
target_buffer[index] = 0.0;
continue;
}
const float box_width = box_x2 - box_x1 + 1.0;
const float box_height = box_y2 - box_y1 + 1.0;
const float ratio_w = (float) mask_size / box_width;
const float ratio_h = (float) mask_size / box_height;
const float inverse_x = ((float) w - box_x1 + 0.5) * ratio_w - 0.5;
const float inverse_y = ((float) h - box_y1 + 0.5) * ratio_h - 0.5;
target_buffer[index] = bilinear_interpolate(offset_mask, mask_size, mask_size, inverse_y, inverse_x);
}
}
__global__ void mask_aggregate(const int nthreads, const float* render_mask, float* aggregate_mask, const int* candidate_inds, const int* candidate_starts, const float* candidate_weights, const int image_height, const int image_width, const float binary_thresh) {
// render_mask: num_boxes * image_height * image_width
// aggregate_mask: output_num * image_height * image_width
CUDA_KERNEL_LOOP(index, nthreads) {
int w = index % image_width;
int h = (index / image_width) % image_height;
int n = index / image_width / image_height;
// get candidate_inds, candidate_start
int candidate_start = (n == 0) ? 0 : candidate_starts[n-1];
int candidate_end = candidate_starts[n];
// output value will be summation of (mask * mask_weight)
float val = 0.0;
for (int i = candidate_start; i < candidate_end; ++i) {
int input_mask_ind = candidate_inds[i];
int offset_render_mask = (input_mask_ind * image_height + h) * image_width + w;
const float mask_val = render_mask[offset_render_mask] >= binary_thresh ? 1.f : 0.f;
val += (mask_val * candidate_weights[i]);
}
aggregate_mask[index] = val;
}
}
__global__ void reduce_mask_col(const int nthreads, const float* masks, int image_height, int image_width, const float binary_thresh, bool* output_buffer) {
// nthreads will be output_num * image_width
CUDA_KERNEL_LOOP(index, nthreads) {
int w = index % image_width;
int n = index / image_width;
output_buffer[index] = false;
for (int i = 0; i < image_height; ++i) {
if (masks[(n * image_height + i) * image_width + w] >= binary_thresh) {
output_buffer[index] = true;
break;
}
}
}
}
__global__ void reduce_mask_row(const int nthreads, const float* masks, int image_height, int image_width, const float binary_thresh, bool* output_buffer) {
// nthreads will be output_num * image_width
CUDA_KERNEL_LOOP(index, nthreads) {
int h = index % image_height;
int n = index / image_height;
output_buffer[index] = false;
for (int i = 0; i < image_width; ++i) {
if (masks[(n * image_height + h) * image_width + i] >= binary_thresh) {
output_buffer[index] = true;
break;
}
}
}
}
__global__ void reduce_bounding_x(const int nthreads, const bool* reduced_col, int* output_buffer, const int image_width) {
// nthreads will be output_num * 2
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % 2;
int n = index / 2;
output_buffer[index] = image_width / 2;
if (x == 0) {
for (int i = 0; i < image_width; ++i) {
if (reduced_col[n * image_width + i]) {
output_buffer[index] = i;
break;
}
}
} else {
for (int i = image_width - 1; i >= 0; --i) {
if (reduced_col[n * image_width + i]) {
output_buffer[index] = i;
break;
}
}
}
}
}
__global__ void reduce_bounding_y(const int nthreads, const bool* reduced_row, int* output_buffer, const int image_height) {
// nthreads will be output_num * 2
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % 2;
int n = index / 2;
output_buffer[index] = image_height / 2;
if (x == 0) {
for (int i = 0; i < image_height; ++i) {
if (reduced_row[n * image_height + i]) {
output_buffer[index] = i;
break;
}
}
} else {
for (int i = image_height - 1; i >= 0; --i) {
if (reduced_row[n * image_height + i]) {
output_buffer[index] = i;
break;
}
}
}
}
}
__global__ void mask_resize(const int nthreads, const float* original_mask, const int* bounding_x, const int* bounding_y, float* resized_mask, const int mask_size, const int image_height, const int image_width) {
// output size should be result_num * mask_size * mask_size
// original_mask should be result_num * image_height * image_width
// bounding_x should be result_num * 2
// bounding_y should be result_num * 2
CUDA_KERNEL_LOOP(index, nthreads) {
int w = index % mask_size;
int h = (index / mask_size) % mask_size;
int n = index / mask_size / mask_size;
int bbox_x1 = bounding_x[n * 2];
int bbox_x2 = bounding_x[n * 2 + 1];
int bbox_y1 = bounding_y[n * 2];
int bbox_y2 = bounding_y[n * 2 + 1];
float bbox_width = bbox_x2 - bbox_x1 + 1.0;
float bbox_height = bbox_y2 - bbox_y1 + 1.0;
float ratio_w = bbox_width / static_cast<float>(mask_size);
float ratio_h = bbox_height / static_cast<float>(mask_size);
float inverse_x = bbox_x1 + static_cast<float>(w + 0.5) * ratio_w - 0.5;
float inverse_y = bbox_y1 + static_cast<float>(h + 0.5) * ratio_h - 0.5;
const float* offset_mask = original_mask + n * image_height * image_width;
resized_mask[index] = bilinear_interpolate(offset_mask, image_height, image_width, inverse_y, inverse_x);
}
}
void _mv(const float* all_boxes, const float* all_masks, const int all_boxes_num, const int* candidate_inds, const int* candidate_start, const float* candidate_weights, const int candidate_num, const float binary_thresh, const int image_height, const int image_width, const int box_dim, const int mask_size, const int result_num, float* finalize_output_mask, int* finalize_output_box, const int device_id) {
// allocate device memory
float* dev_boxes = NULL;
float* dev_masks = NULL;
int* dev_candidate_inds = NULL;
float* dev_candidate_weights = NULL;
int* dev_candidate_start = NULL;
CUDA_CHECK(hipMalloc(&dev_boxes, all_boxes_num * box_dim * sizeof(float)));
CUDA_CHECK(hipMemcpy(dev_boxes, all_boxes, all_boxes_num * box_dim * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&dev_masks, all_boxes_num * mask_size * mask_size * sizeof(float)));
CUDA_CHECK(hipMemcpy(dev_masks, all_masks, all_boxes_num * mask_size * mask_size * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&dev_candidate_inds, candidate_num * sizeof(int)));
CUDA_CHECK(hipMemcpy(dev_candidate_inds, candidate_inds, candidate_num * sizeof(int),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&dev_candidate_weights, candidate_num * sizeof(int)));
CUDA_CHECK(hipMemcpy(dev_candidate_weights, candidate_weights, candidate_num * sizeof(int),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&dev_candidate_start, result_num * sizeof(int)));
CUDA_CHECK(hipMemcpy(dev_candidate_start, candidate_start, result_num * sizeof(int),
hipMemcpyHostToDevice));
// 1. Masks are of size mask_size x mask_size, to do aggregation
// first resize them to image scale (image_height x image_width)
// result n x image_height x image_width buffer
const int render_mask_num = all_boxes_num * image_height * image_width;
float* dev_render_mask = NULL;
CUDA_CHECK(hipMalloc(&dev_render_mask, render_mask_num * sizeof(float)));
hipLaunchKernelGGL(( mask_render), dim3(CAFFE_GET_BLOCKS(render_mask_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, render_mask_num, dev_boxes, dev_masks, box_dim, mask_size, image_height, image_width, dev_render_mask);
CUDA_POST_KERNEL_CHECK;
// 2. After we get above buffer, we need to merge certain masks
// to get new masks according to candidate_weights and candidate_inds
// new_mask = \sum (old_mask * old_mask_weight)
const int output_mask_num = result_num * image_height * image_width;
float* dev_output_mask = NULL;
CUDA_CHECK(hipMalloc(&dev_output_mask, output_mask_num * sizeof(float)));
hipLaunchKernelGGL(( mask_aggregate), dim3(CAFFE_GET_BLOCKS(output_mask_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, output_mask_num, dev_render_mask, dev_output_mask, dev_candidate_inds, dev_candidate_start, dev_candidate_weights, image_height, image_width, binary_thresh);
CUDA_POST_KERNEL_CHECK;
// 3. After we get new masks buffer (result_num * image_height * image_width)
// we then find the mask boundary, this is achieved by two reduction operation
// then the tight mask boundary can be obtained
int reduced_col_num = result_num * image_width;
bool* reduced_col_buffer = NULL;
CUDA_CHECK(hipMalloc(&reduced_col_buffer, reduced_col_num * sizeof(bool)));
hipLaunchKernelGGL(( reduce_mask_col), dim3(CAFFE_GET_BLOCKS(reduced_col_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, reduced_col_num, dev_output_mask, image_height, image_width, binary_thresh, reduced_col_buffer);
int reduced_bound_x_num = result_num * 2;
int* reduced_bound_x = NULL;
CUDA_CHECK(hipMalloc(&reduced_bound_x, reduced_bound_x_num * sizeof(int)));
hipLaunchKernelGGL(( reduce_bounding_x), dim3(CAFFE_GET_BLOCKS(reduced_bound_x_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, reduced_bound_x_num, reduced_col_buffer, reduced_bound_x, image_width);
// find vertical boundary
int reduced_row_num = result_num * image_height;
bool* reduced_row_buffer = NULL;
CUDA_CHECK(hipMalloc(&reduced_row_buffer, reduced_row_num * sizeof(bool)));
hipLaunchKernelGGL(( reduce_mask_row), dim3(CAFFE_GET_BLOCKS(reduced_row_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, reduced_row_num, dev_output_mask, image_height, image_width, binary_thresh, reduced_row_buffer);
int reduced_bound_y_num = result_num * 2;
int* reduced_bound_y = NULL;
CUDA_CHECK(hipMalloc(&reduced_bound_y, reduced_bound_y_num * sizeof(int)));
hipLaunchKernelGGL(( reduce_bounding_y), dim3(CAFFE_GET_BLOCKS(reduced_bound_y_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, reduced_bound_y_num, reduced_row_buffer, reduced_bound_y, image_height);
// 4. Once we get tight mask boundary, we could use it to resize masks back
// to mask_size x mask_size
const int resized_mask_num = result_num * mask_size * mask_size;
float* resized_mask = NULL;
CUDA_CHECK(hipMalloc(&resized_mask, resized_mask_num * sizeof(float)));
hipLaunchKernelGGL(( mask_resize), dim3(CAFFE_GET_BLOCKS(resized_mask_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, resized_mask_num, dev_output_mask, reduced_bound_x, reduced_bound_y, resized_mask, mask_size, image_height, image_width);
// copy back boxes to cpu
int* cpu_bound_x = (int*) malloc(reduced_bound_x_num * sizeof(int));
hipMemcpy(cpu_bound_x, reduced_bound_x, reduced_bound_x_num * sizeof(int), hipMemcpyDeviceToHost);
int* cpu_bound_y = (int*) malloc(reduced_bound_y_num * sizeof(int));
hipMemcpy(cpu_bound_y, reduced_bound_y, reduced_bound_y_num * sizeof(int), hipMemcpyDeviceToHost);
int cnt = 0;
for (int i = 0; i < result_num; i ++) {
finalize_output_box[i*4] = cpu_bound_x[cnt];
finalize_output_box[i*4+1] = cpu_bound_y[cnt];
finalize_output_box[i*4+2] = cpu_bound_x[cnt+1];
finalize_output_box[i*4+3] = cpu_bound_y[cnt+1];
cnt += 2;
}
// copy back masks to cpu
CUDA_CHECK(hipMemcpy(finalize_output_mask, resized_mask, resized_mask_num * sizeof(float),
hipMemcpyDeviceToHost));
// free gpu memories
CUDA_CHECK(hipFree(dev_boxes));
CUDA_CHECK(hipFree(dev_masks));
CUDA_CHECK(hipFree(dev_candidate_inds));
CUDA_CHECK(hipFree(dev_candidate_start));
CUDA_CHECK(hipFree(dev_candidate_weights));
CUDA_CHECK(hipFree(dev_render_mask));
CUDA_CHECK(hipFree(resized_mask));
CUDA_CHECK(hipFree(dev_output_mask));
CUDA_CHECK(hipFree(reduced_col_buffer));
CUDA_CHECK(hipFree(reduced_bound_x));
CUDA_CHECK(hipFree(reduced_row_buffer));
CUDA_CHECK(hipFree(reduced_bound_y));
}
|
cb04c1181c6d842bc23a4b64dbe573bf4d5cb236.cu
|
// ------------------------------------------------------------------
// Fully Convolutional Instance-aware Semantic Segmentation
// Copyright (c) 2017 Microsoft
// Licensed under The MIT License [see LICENSE for details]
// Written by Haozhi Qi
// ------------------------------------------------------------------
#include "gpu_mv.hpp"
#include <iostream>
const int CAFFE_CUDA_NUM_THREADS = 512;
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}
#define CUDA_POST_KERNEL_CHECK CUDA_CHECK(cudaPeekAtLastError())
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
__device__ float bilinear_interpolate(const float* bottom_data,
const int input_height, const int input_width,
float inverse_y, float inverse_x) {
// deal with cases that inverse elements are out of feature map boundary
if (inverse_y <= 0) inverse_y = 0;
if (inverse_x <= 0) inverse_x = 0;
int h_low = (int) inverse_y;
int w_low = (int) inverse_x;
int h_high;
int w_high;
// handle boundary case
if (h_low >= input_height - 1) {
h_high = h_low = input_height - 1;
inverse_y = (float) h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= input_width - 1) {
w_high = w_low = input_width - 1;
inverse_x = (float) w_low;
} else {
w_high = w_low + 1;
}
float lh = inverse_y - h_low;
float lw = inverse_x - w_low;
float hh = 1 - lh, hw = 1 - lw;
// corner point of interpolation
float v1 = bottom_data[h_low * input_width + w_low];
float v2 = bottom_data[h_low * input_width + w_high];
float v3 = bottom_data[h_high * input_width + w_low];
float v4 = bottom_data[h_high * input_width + w_high];
// weight for each corner
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
// do bilinear interpolation
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__global__ void mask_render(const int nthreads, const float* input_box, const float* input_mask, const int box_dim, const int mask_size,
const int image_height, const int image_width, float* target_buffer) {
CUDA_KERNEL_LOOP(index, nthreads) {
// target buffer's size if (n * h * w)
int w = index % image_width;
int h = (index / image_width) % image_height;
int n = index / image_width / image_height;
// get the n-th boxes
const float* offset_box = input_box + n * box_dim;
const float* offset_mask = input_mask + n * mask_size * mask_size;
const float box_x1 = offset_box[0];
const float box_y1 = offset_box[1];
const float box_x2 = offset_box[2];
const float box_y2 = offset_box[3];
// check whether pixel is out of box bound
if (w < box_x1 || w > box_x2 || h < box_y1 || h > box_y2) {
target_buffer[index] = 0.0;
continue;
}
const float box_width = box_x2 - box_x1 + 1.0;
const float box_height = box_y2 - box_y1 + 1.0;
const float ratio_w = (float) mask_size / box_width;
const float ratio_h = (float) mask_size / box_height;
const float inverse_x = ((float) w - box_x1 + 0.5) * ratio_w - 0.5;
const float inverse_y = ((float) h - box_y1 + 0.5) * ratio_h - 0.5;
target_buffer[index] = bilinear_interpolate(offset_mask, mask_size, mask_size, inverse_y, inverse_x);
}
}
__global__ void mask_aggregate(const int nthreads, const float* render_mask, float* aggregate_mask, const int* candidate_inds, const int* candidate_starts, const float* candidate_weights, const int image_height, const int image_width, const float binary_thresh) {
// render_mask: num_boxes * image_height * image_width
// aggregate_mask: output_num * image_height * image_width
CUDA_KERNEL_LOOP(index, nthreads) {
int w = index % image_width;
int h = (index / image_width) % image_height;
int n = index / image_width / image_height;
// get candidate_inds, candidate_start
int candidate_start = (n == 0) ? 0 : candidate_starts[n-1];
int candidate_end = candidate_starts[n];
// output value will be summation of (mask * mask_weight)
float val = 0.0;
for (int i = candidate_start; i < candidate_end; ++i) {
int input_mask_ind = candidate_inds[i];
int offset_render_mask = (input_mask_ind * image_height + h) * image_width + w;
const float mask_val = render_mask[offset_render_mask] >= binary_thresh ? 1.f : 0.f;
val += (mask_val * candidate_weights[i]);
}
aggregate_mask[index] = val;
}
}
__global__ void reduce_mask_col(const int nthreads, const float* masks, int image_height, int image_width, const float binary_thresh, bool* output_buffer) {
// nthreads will be output_num * image_width
CUDA_KERNEL_LOOP(index, nthreads) {
int w = index % image_width;
int n = index / image_width;
output_buffer[index] = false;
for (int i = 0; i < image_height; ++i) {
if (masks[(n * image_height + i) * image_width + w] >= binary_thresh) {
output_buffer[index] = true;
break;
}
}
}
}
__global__ void reduce_mask_row(const int nthreads, const float* masks, int image_height, int image_width, const float binary_thresh, bool* output_buffer) {
// nthreads will be output_num * image_width
CUDA_KERNEL_LOOP(index, nthreads) {
int h = index % image_height;
int n = index / image_height;
output_buffer[index] = false;
for (int i = 0; i < image_width; ++i) {
if (masks[(n * image_height + h) * image_width + i] >= binary_thresh) {
output_buffer[index] = true;
break;
}
}
}
}
__global__ void reduce_bounding_x(const int nthreads, const bool* reduced_col, int* output_buffer, const int image_width) {
// nthreads will be output_num * 2
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % 2;
int n = index / 2;
output_buffer[index] = image_width / 2;
if (x == 0) {
for (int i = 0; i < image_width; ++i) {
if (reduced_col[n * image_width + i]) {
output_buffer[index] = i;
break;
}
}
} else {
for (int i = image_width - 1; i >= 0; --i) {
if (reduced_col[n * image_width + i]) {
output_buffer[index] = i;
break;
}
}
}
}
}
__global__ void reduce_bounding_y(const int nthreads, const bool* reduced_row, int* output_buffer, const int image_height) {
// nthreads will be output_num * 2
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % 2;
int n = index / 2;
output_buffer[index] = image_height / 2;
if (x == 0) {
for (int i = 0; i < image_height; ++i) {
if (reduced_row[n * image_height + i]) {
output_buffer[index] = i;
break;
}
}
} else {
for (int i = image_height - 1; i >= 0; --i) {
if (reduced_row[n * image_height + i]) {
output_buffer[index] = i;
break;
}
}
}
}
}
__global__ void mask_resize(const int nthreads, const float* original_mask, const int* bounding_x, const int* bounding_y, float* resized_mask, const int mask_size, const int image_height, const int image_width) {
// output size should be result_num * mask_size * mask_size
// original_mask should be result_num * image_height * image_width
// bounding_x should be result_num * 2
// bounding_y should be result_num * 2
CUDA_KERNEL_LOOP(index, nthreads) {
int w = index % mask_size;
int h = (index / mask_size) % mask_size;
int n = index / mask_size / mask_size;
int bbox_x1 = bounding_x[n * 2];
int bbox_x2 = bounding_x[n * 2 + 1];
int bbox_y1 = bounding_y[n * 2];
int bbox_y2 = bounding_y[n * 2 + 1];
float bbox_width = bbox_x2 - bbox_x1 + 1.0;
float bbox_height = bbox_y2 - bbox_y1 + 1.0;
float ratio_w = bbox_width / static_cast<float>(mask_size);
float ratio_h = bbox_height / static_cast<float>(mask_size);
float inverse_x = bbox_x1 + static_cast<float>(w + 0.5) * ratio_w - 0.5;
float inverse_y = bbox_y1 + static_cast<float>(h + 0.5) * ratio_h - 0.5;
const float* offset_mask = original_mask + n * image_height * image_width;
resized_mask[index] = bilinear_interpolate(offset_mask, image_height, image_width, inverse_y, inverse_x);
}
}
void _mv(const float* all_boxes, const float* all_masks, const int all_boxes_num, const int* candidate_inds, const int* candidate_start, const float* candidate_weights, const int candidate_num, const float binary_thresh, const int image_height, const int image_width, const int box_dim, const int mask_size, const int result_num, float* finalize_output_mask, int* finalize_output_box, const int device_id) {
// allocate device memory
float* dev_boxes = NULL;
float* dev_masks = NULL;
int* dev_candidate_inds = NULL;
float* dev_candidate_weights = NULL;
int* dev_candidate_start = NULL;
CUDA_CHECK(cudaMalloc(&dev_boxes, all_boxes_num * box_dim * sizeof(float)));
CUDA_CHECK(cudaMemcpy(dev_boxes, all_boxes, all_boxes_num * box_dim * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&dev_masks, all_boxes_num * mask_size * mask_size * sizeof(float)));
CUDA_CHECK(cudaMemcpy(dev_masks, all_masks, all_boxes_num * mask_size * mask_size * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&dev_candidate_inds, candidate_num * sizeof(int)));
CUDA_CHECK(cudaMemcpy(dev_candidate_inds, candidate_inds, candidate_num * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&dev_candidate_weights, candidate_num * sizeof(int)));
CUDA_CHECK(cudaMemcpy(dev_candidate_weights, candidate_weights, candidate_num * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&dev_candidate_start, result_num * sizeof(int)));
CUDA_CHECK(cudaMemcpy(dev_candidate_start, candidate_start, result_num * sizeof(int),
cudaMemcpyHostToDevice));
// 1. Masks are of size mask_size x mask_size, to do aggregation
// first resize them to image scale (image_height x image_width)
// result n x image_height x image_width buffer
const int render_mask_num = all_boxes_num * image_height * image_width;
float* dev_render_mask = NULL;
CUDA_CHECK(cudaMalloc(&dev_render_mask, render_mask_num * sizeof(float)));
mask_render<<<CAFFE_GET_BLOCKS(render_mask_num), CAFFE_CUDA_NUM_THREADS>>> (render_mask_num, dev_boxes, dev_masks, box_dim, mask_size, image_height, image_width, dev_render_mask);
CUDA_POST_KERNEL_CHECK;
// 2. After we get above buffer, we need to merge certain masks
// to get new masks according to candidate_weights and candidate_inds
// new_mask = \sum (old_mask * old_mask_weight)
const int output_mask_num = result_num * image_height * image_width;
float* dev_output_mask = NULL;
CUDA_CHECK(cudaMalloc(&dev_output_mask, output_mask_num * sizeof(float)));
mask_aggregate<<<CAFFE_GET_BLOCKS(output_mask_num), CAFFE_CUDA_NUM_THREADS>>> (output_mask_num, dev_render_mask, dev_output_mask, dev_candidate_inds, dev_candidate_start, dev_candidate_weights, image_height, image_width, binary_thresh);
CUDA_POST_KERNEL_CHECK;
// 3. After we get new masks buffer (result_num * image_height * image_width)
// we then find the mask boundary, this is achieved by two reduction operation
// then the tight mask boundary can be obtained
int reduced_col_num = result_num * image_width;
bool* reduced_col_buffer = NULL;
CUDA_CHECK(cudaMalloc(&reduced_col_buffer, reduced_col_num * sizeof(bool)));
reduce_mask_col<<<CAFFE_GET_BLOCKS(reduced_col_num), CAFFE_CUDA_NUM_THREADS>>> (reduced_col_num, dev_output_mask, image_height, image_width, binary_thresh, reduced_col_buffer);
int reduced_bound_x_num = result_num * 2;
int* reduced_bound_x = NULL;
CUDA_CHECK(cudaMalloc(&reduced_bound_x, reduced_bound_x_num * sizeof(int)));
reduce_bounding_x<<<CAFFE_GET_BLOCKS(reduced_bound_x_num), CAFFE_CUDA_NUM_THREADS>>> (reduced_bound_x_num, reduced_col_buffer, reduced_bound_x, image_width);
// find vertical boundary
int reduced_row_num = result_num * image_height;
bool* reduced_row_buffer = NULL;
CUDA_CHECK(cudaMalloc(&reduced_row_buffer, reduced_row_num * sizeof(bool)));
reduce_mask_row<<<CAFFE_GET_BLOCKS(reduced_row_num), CAFFE_CUDA_NUM_THREADS>>> (reduced_row_num, dev_output_mask, image_height, image_width, binary_thresh, reduced_row_buffer);
int reduced_bound_y_num = result_num * 2;
int* reduced_bound_y = NULL;
CUDA_CHECK(cudaMalloc(&reduced_bound_y, reduced_bound_y_num * sizeof(int)));
reduce_bounding_y<<<CAFFE_GET_BLOCKS(reduced_bound_y_num), CAFFE_CUDA_NUM_THREADS>>> (reduced_bound_y_num, reduced_row_buffer, reduced_bound_y, image_height);
// 4. Once we get tight mask boundary, we could use it to resize masks back
// to mask_size x mask_size
const int resized_mask_num = result_num * mask_size * mask_size;
float* resized_mask = NULL;
CUDA_CHECK(cudaMalloc(&resized_mask, resized_mask_num * sizeof(float)));
mask_resize<<<CAFFE_GET_BLOCKS(resized_mask_num), CAFFE_CUDA_NUM_THREADS>>> (resized_mask_num, dev_output_mask, reduced_bound_x, reduced_bound_y, resized_mask, mask_size, image_height, image_width);
// copy back boxes to cpu
int* cpu_bound_x = (int*) malloc(reduced_bound_x_num * sizeof(int));
cudaMemcpy(cpu_bound_x, reduced_bound_x, reduced_bound_x_num * sizeof(int), cudaMemcpyDeviceToHost);
int* cpu_bound_y = (int*) malloc(reduced_bound_y_num * sizeof(int));
cudaMemcpy(cpu_bound_y, reduced_bound_y, reduced_bound_y_num * sizeof(int), cudaMemcpyDeviceToHost);
int cnt = 0;
for (int i = 0; i < result_num; i ++) {
finalize_output_box[i*4] = cpu_bound_x[cnt];
finalize_output_box[i*4+1] = cpu_bound_y[cnt];
finalize_output_box[i*4+2] = cpu_bound_x[cnt+1];
finalize_output_box[i*4+3] = cpu_bound_y[cnt+1];
cnt += 2;
}
// copy back masks to cpu
CUDA_CHECK(cudaMemcpy(finalize_output_mask, resized_mask, resized_mask_num * sizeof(float),
cudaMemcpyDeviceToHost));
// free gpu memories
CUDA_CHECK(cudaFree(dev_boxes));
CUDA_CHECK(cudaFree(dev_masks));
CUDA_CHECK(cudaFree(dev_candidate_inds));
CUDA_CHECK(cudaFree(dev_candidate_start));
CUDA_CHECK(cudaFree(dev_candidate_weights));
CUDA_CHECK(cudaFree(dev_render_mask));
CUDA_CHECK(cudaFree(resized_mask));
CUDA_CHECK(cudaFree(dev_output_mask));
CUDA_CHECK(cudaFree(reduced_col_buffer));
CUDA_CHECK(cudaFree(reduced_bound_x));
CUDA_CHECK(cudaFree(reduced_row_buffer));
CUDA_CHECK(cudaFree(reduced_bound_y));
}
|
ae18adfaf2f9df8a77aa293b42f325eaf5848e87.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hist-equ.h"
void gpu_histogram(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin){
int i;
for ( i = 0; i < nbr_bin; i ++){
hist_out[i] = 0;
}
for ( i = 0; i < img_size; i ++){
hist_out[img_in[i]] ++;
}
}
__global__ void gpu_histogram_equalization(unsigned char * img_out, unsigned char * img_in,
int * hist_in, int img_size, int nbr_bin, int numOfThreads, int * lut){
int i = 0;
int x = threadIdx.x + blockDim.x*blockIdx.x;
int start;
int end;
//hist_in[x%256] = x;
/* Get the result image */
if(x >= img_size) {
return;
}
start = ((img_size/numOfThreads) * x);
if(numOfThreads == 1) {
end = (img_size/numOfThreads);
}
else {
end = ((img_size/numOfThreads) * (x+1));
}
for(i = start; i < end; i ++){
if(lut[img_in[i]] > 255){
img_out[i] = 255;
}
else{
img_out[i] = (unsigned char)lut[img_in[i]];
}
}
}
|
ae18adfaf2f9df8a77aa293b42f325eaf5848e87.cu
|
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hist-equ.h"
void gpu_histogram(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin){
int i;
for ( i = 0; i < nbr_bin; i ++){
hist_out[i] = 0;
}
for ( i = 0; i < img_size; i ++){
hist_out[img_in[i]] ++;
}
}
__global__ void gpu_histogram_equalization(unsigned char * img_out, unsigned char * img_in,
int * hist_in, int img_size, int nbr_bin, int numOfThreads, int * lut){
int i = 0;
int x = threadIdx.x + blockDim.x*blockIdx.x;
int start;
int end;
//hist_in[x%256] = x;
/* Get the result image */
if(x >= img_size) {
return;
}
start = ((img_size/numOfThreads) * x);
if(numOfThreads == 1) {
end = (img_size/numOfThreads);
}
else {
end = ((img_size/numOfThreads) * (x+1));
}
for(i = start; i < end; i ++){
if(lut[img_in[i]] > 255){
img_out[i] = 255;
}
else{
img_out[i] = (unsigned char)lut[img_in[i]];
}
}
}
|
d97be84c531ebf76809037d77b0200b2cd592c82.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <image.h>
// Lab color space
__constant__ __device__ float Lab_M[9];
__constant__ __device__ float Lab_Mi[9];
__constant__ __device__ float3 Lab_W;
#define Lab_e 0.008856f
#define Lab_k 903.3f
#define Lab_v 0.0031308f
#define Lab_vi 0.04045f
static void setup_cielab(hipStream_t stream)
{
static bool isInitialized = false;
if (isInitialized) return;
isInitialized = true;
int rc;
float pW[3] = { 0.95047f, 1.0f, 1.08883f };
float pM[9] = {
0.4124f, 0.3576f, 0.1805f,
0.2126f, 0.7152f, 0.0722f,
0.0193f, 0.1192f, 0.9504f,
};
float pMi[9] = {
3.2406f,-1.5372f,-0.4986f,
-0.9689f, 1.8758f, 0.0415f,
0.0557f, -0.2040, 1.0571f,
};
rc = hipMemcpyToSymbolAsync(Lab_M, &pM, 9*sizeof(float), 0, hipMemcpyHostToDevice,stream);
if (hipSuccess != rc) throw "Unable to copy cielab chromacity matrix";
rc = hipMemcpyToSymbolAsync(Lab_Mi, &pMi, 9*sizeof(float), 0, hipMemcpyHostToDevice,stream);
if (hipSuccess != rc) throw "Unable to copy cielab inverted chromacity matrix";
rc = hipMemcpyToSymbolAsync(Lab_W, &pW, sizeof(float3), 0, hipMemcpyHostToDevice, stream);
if (hipSuccess != rc) throw "Unable to copy cielab reference white";
}
__global__
void f_ppm8_to_cielab(float3* out, size_t pitch_out, uchar3* in, size_t pitch_in, size_t width, size_t height)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= width || y >= height) return;
uchar3 p8 = ViewSingleSym<uchar3>(in, pitch_in, x, y, width, height);
float3 RGB = make_float3(p8.x, p8.y, p8.z) / 255.0f;
float3 rgb = make_float3(
RGB.x <= Lab_vi ? RGB.x / 12.92 : pow((RGB.x + 0.055)/1.055, 2.4),
RGB.y <= Lab_vi ? RGB.y / 12.92 : pow((RGB.y + 0.055)/1.055, 2.4),
RGB.z <= Lab_vi ? RGB.z / 12.92 : pow((RGB.z + 0.055)/1.055, 2.4)
);
float3 xyz = make_float3(
Lab_M[0] * rgb.x + Lab_M[1] * rgb.y + Lab_M[2] * rgb.z,
Lab_M[3] * rgb.x + Lab_M[4] * rgb.y + Lab_M[5] * rgb.z,
Lab_M[6] * rgb.x + Lab_M[7] * rgb.y + Lab_M[8] * rgb.z
);
float3 r = make_float3(
xyz.x / Lab_W.x,
xyz.y / Lab_W.y,
xyz.z / Lab_W.z
);
float3 f = make_float3(
r.x > Lab_e ? pow(r.x, 1.0f/3.0f) : (Lab_k * r.x + 16.0f) / 116.0f,
r.y > Lab_e ? pow(r.y, 1.0f/3.0f) : (Lab_k * r.y + 16.0f) / 116.0f,
r.z > Lab_e ? pow(r.z, 1.0f/3.0f) : (Lab_k * r.z + 16.0f) / 116.0f
);
float3 Lab = make_float3(
116.0f * f.y - 16.0f,
500.0f * (f.x - f.y),
200.0f * (f.y - f.z));
out[y * pitch_out / sizeof(float3) + x] = Lab;
//ViewSingleSym<float3>(out, pitch_out, x, y, width, height) = Lab;
}
__global__
void f_cielab_to_ppm8(uchar3* out, size_t pitch_out, float3* in, size_t pitch_in, size_t width, size_t height)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= width || y >= height) return;
float3 Lab = ViewSingleSym<float3>(in, pitch_in, x, y, width, height);
float3 f = make_float3(
(Lab.x+16.0f)/116.0f + Lab.y/500.0f,
(Lab.x+16.0f)/116.0f,
(Lab.x+16.0f)/116.0f - Lab.z/200.0f
);
float3 f3 = make_float3(
f.x * f.x * f.x,
f.y * f.y * f.y,
f.z * f.z * f.z
);
float3 r = make_float3(
f3.x > Lab_e ? f3.x : (116.0f * f.x - 16.0f)/Lab_k,
f3.y > Lab_e ? f3.y : (116.0f * f.y - 16.0f)/Lab_k,
f3.z > Lab_e ? f3.z : (116.0f * f.z - 16.0f)/Lab_k
//Lab.x > Lab_k*Lab_e ? f3.y : Lab.x/Lab_k
);
float3 xyz = make_float3(
r.x * Lab_W.x,
r.y * Lab_W.y,
r.z * Lab_W.z
);
float3 rgb = make_float3(
Lab_Mi[0] * xyz.x + Lab_Mi[1] * xyz.y + Lab_Mi[2] * xyz.z,
Lab_Mi[3] * xyz.x + Lab_Mi[4] * xyz.y + Lab_Mi[5] * xyz.z,
Lab_Mi[6] * xyz.x + Lab_Mi[7] * xyz.y + Lab_Mi[8] * xyz.z
);
float3 RGB = make_float3(
rgb.x <= Lab_v ? 12.92f * rgb.x : 1.055f * pow(rgb.x, 1.0f/2.4f) - 0.055,
rgb.y <= Lab_v ? 12.92f * rgb.y : 1.055f * pow(rgb.y, 1.0f/2.4f) - 0.055,
rgb.z <= Lab_v ? 12.92f * rgb.z : 1.055f * pow(rgb.z, 1.0f/2.4f) - 0.055
);
ViewSingleSym<uchar3>(out, pitch_out, x, y, width, height) =
make_uchar3(
clamp(RGB.x*255.0f, 0.0f, 255.0f),
clamp(RGB.y*255.0f, 0.0f, 255.0f),
clamp(RGB.z*255.0f, 0.0f, 255.0f)
);
}
static const char* strrstr(const char* c, const char* find)
{
if (!c || !find) return nullptr;
if (strlen(find) == 0) return c + strlen(c);
if (strlen(c) < strlen(find)) return nullptr;
for (int i=strlen(c)-strlen(find); i >= 0; i--)
{
if (!memcmp(c + i, find, strlen(find))) return c + i;
}
return nullptr;
}
Image::~Image()
{
if (_filename) free(_filename);
if (mem.host.data) hipHostFree(mem.host.data);
if (mem.device.data) hipFree(mem.device.data);
}
Image::Image()
{
_filename = nullptr;
mem.host.data = nullptr;
mem.device.data = nullptr;
filename = nullptr;
width = 0;
height = 0;
mem.host.pitch = 0;
mem.device.pitch = 0;
channels = 0;
bpp = 0;
type = Type::unknown;
}
void Image::copyToHost(hipStream_t stream)
{
int rc = hipMemcpy2DAsync(
mem.host.data, mem.host.pitch,
mem.device.data, mem.device.pitch,
width * (bpp >> 3) * channels,
height,
hipMemcpyDeviceToHost,
stream);
if (hipSuccess != rc) throw "Unable to copy from device to host";
}
void Image::copyToDevice(hipStream_t stream)
{
int rc = hipMemcpy2DAsync(
mem.device.data, mem.device.pitch,
mem.host.data, mem.host.pitch,
width * (bpp >> 3) * channels,
height,
hipMemcpyHostToDevice,
stream);
if (hipSuccess != rc) throw "Unable to copy from host to device";
}
void Image::toLab(Image* image, hipStream_t stream)
{
printInfo();
image->printInfo();
if (!image) throw "Image is null";
if (image->width != width || image->height != height)
throw "Images are not the same size";
if (image->type != Type::lab)
throw "Destination image must be of type Lab";
if (type != Type::ppm || bpp != 8)
throw "Only works for ppm 8bpp at the moment";
dim3 blockSize = { 16, 16 };
dim3 gridSize = {
((int)width + blockSize.x - 1) / blockSize.x,
((int)height + blockSize.y - 1) / blockSize.y
};
setup_cielab(stream);
hipLaunchKernelGGL(( f_ppm8_to_cielab) , dim3(gridSize), dim3(blockSize), 0, stream ,
(float3*) image->mem.device.data, image->mem.device.pitch,
(uchar3*) this->mem.device.data, this->mem.device.pitch,
width, height);
}
void Image::fromLab(Image* image, hipStream_t stream)
{
if (!image) throw "Image is null";
if (image->width != width || image->height != height)
throw "Images are not the same size";
if (image->type != Type::lab)
throw "Destination image must be of type Lab";
if (type != Type::ppm || bpp != 8)
throw "Only works for ppm 8bpp at the moment";
dim3 blockSize = { 32, 32 };
dim3 gridSize = {
((int)width + blockSize.x - 1) / blockSize.x,
((int)height + blockSize.y - 1) / blockSize.y
};
setup_cielab(stream);
hipLaunchKernelGGL(( f_cielab_to_ppm8) , dim3(gridSize), dim3(blockSize), 0, stream ,
(uchar3*) this->mem.device.data, this->mem.device.pitch,
(float3*) image->mem.device.data, image->mem.device.pitch,
width, height);
}
void Image::printInfo()
{
printf("IMAGE %s\n", filename);
const char* typeName;
switch (type)
{
case Type::unknown: typeName = "unknown"; break;
case Type::jpeg: typeName = "JPEG"; break;
case Type::ppm: typeName = "PPM"; break;
case Type::pgm: typeName = "PGM"; break;
case Type::raw: typeName = "RAW"; break;
case Type::lab: typeName = "LAB"; break;
default: typeName = "INVALID!"; break;
}
printf("- TYPE: %s\n", typeName);
printf("- SIZE: %lu x %lu\n", width, height);
printf("- PITCH: %lu (dev), %lu (host)\n", mem.device.pitch, mem.host.pitch);
printf("- RANGE: 0x%lX\n", range);
printf("\n");
fflush(stdout);
}
Image* Image::create(Type type, size_t width, size_t height, size_t channels, size_t bpp)
{
int rc;
auto result = new Image();
result->width = width;
result->height = height;
result->type = type;
result->bpp = bpp;
if (!channels)
switch (type)
{
case Type::jpeg:
case Type::ppm:
result->channels=3;
break;
case Type::lab:
result->channels=3;
result->bpp = bpp =32;
break;
case Type::pgm:
result->channels=1;
break;
default: throw "Invalid image type";
}
else result->channels = channels;
result->range = (1ULL << result->bpp) - 1;
result->mem.host.pitch = width * (bpp >> 3) * result->channels;
rc = hipHostMalloc(&result->mem.host.data, result->mem.host.pitch * height);
if (hipSuccess != rc) throw "Unable to allocate host memory for image";
rc = hipMallocPitch(&result->mem.device.data, &result->mem.device.pitch,
width * (bpp >> 3) * result->channels, height);
if (hipSuccess != rc) throw "Unable to allocate device memory for image";
return result;
}
void Image::loadPPM()
{
int rc;
FILE* f = fopen(filename, "rb");
if (!f) throw "Unable to open file";
rc = fscanf(f, "P6 %lu %lu %lu \n", &width, &height, &range);
if (rc <= 0) throw "Unable to read PPM header";
type = Type::ppm;
channels = 3;
if (range < 256) bpp = 8;
else bpp = 16;
mem.host.pitch = width * (bpp >> 3) * channels;
rc = hipHostMalloc(&mem.host.data, mem.host.pitch * height);
if (hipSuccess != rc) throw "Unable to allocate host memory for image";
rc = hipMallocPitch(&mem.device.data, &mem.device.pitch, width * (bpp >> 3) * channels, height);
if (hipSuccess != rc) throw "Unable to allocate device memory for image";
rc = fread(mem.host.data, 1, mem.host.pitch * height, f);
if (rc <= 0) throw "Unable to read image data from PPM";
fclose(f);
}
void Image::loadPGM()
{
}
void Image::loadJPG()
{
}
Image* Image::load(const char* filename)
{
auto result = new Image();
result->filename = result->_filename = strdup(filename);
const char* extension = strrstr(filename, ".");
if (!strcmp(extension, ".jpeg")) result->loadJPG();
if (!strcmp(extension, ".jpg")) result->loadJPG();
if (!strcmp(extension, ".ppm")) result->loadPPM();
if (!strcmp(extension, ".pgm")) result->loadPGM();
return result;
}
float Image::psnr(const Image* ref)
{
float mse = 0;
for (size_t x=2; x<width-2; x++)
{
for (size_t y=2; y<height-2; y++)
{
for (size_t c=0; c<channels; c++)
{
void* p = ((uint8_t*)mem.host.data
+ y * mem.host.pitch
+ (x * channels + c) * (bpp>>3));
void* q = ((uint8_t*)ref->mem.host.data
+ y * ref->mem.host.pitch
+ (x * channels + c) * (bpp>>3));
float pv,qv;
switch (bpp)
{
case 8:
pv = (float)*(uint8_t*)p;
qv = (float)*(uint8_t*)q;
break;
case 16:
pv = (float)*(uint16_t*)p;
qv = (float)*(uint16_t*)q;
break;
default: throw "Unable to calculate PSNR due to unexpected bpp";
}
mse += (pv - qv) * (pv - qv);
}
}
}
mse /= width * height * channels;
return 20 * log10(range) - 10 * log10(mse);
}
JpegCodec::JpegCodec()
{
_width = 0;
_height = 0;
_channels = 0;
_buffer = nullptr;
_scanlines = nullptr;
_dinfo.err = jpeg_std_error(&_djerr);
_cinfo.err = jpeg_std_error(&_cjerr);
}
JpegCodec::~JpegCodec()
{
free(_buffer);
free(_scanlines);
}
void JpegCodec::prepare(int width, int height, int channels, int quality)
{
if (channels != 3) throw "Not implemented channels != 3";
_width = width;
_height = height;
_channels = channels;
_buffer = (uint8_t*) malloc(_width * _height * _channels);
if (!_buffer) throw "Unable to allocate intermediate buffer";
_scanlines = (JSAMPARRAY) malloc( sizeof(JSAMPROW) * height);
if (!_scanlines)
{
free(_buffer);
throw "Unable to allocate scanlines structure";
}
for (size_t i=0; i<_height; i++)
{
_scanlines[i] = (JSAMPROW) (_buffer + i * _width * _channels);
}
jpeg_create_decompress(&_dinfo);
jpeg_create_compress(&_cinfo);
_cinfo.image_width = _width;
_cinfo.image_height = height;
_cinfo.input_components = 3;
_cinfo.in_color_space = JCS_RGB;
jpeg_set_defaults(&_cinfo);
jpeg_set_quality(&_cinfo, quality, 1);
}
void JpegCodec::unprepare()
{
jpeg_destroy_decompress(&_dinfo);
jpeg_destroy_compress(&_cinfo);
}
void JpegCodec::encodeCPU(void* dst, size_t *size)
{
//hipMemcpyAsync(_buffer, src, _width * _height * _channels, hipMemcpyDeviceToHost, stream);
//hipStreamSynchronize(stream);
jpeg_mem_dest(&_cinfo, (uint8_t**)&dst, size);
jpeg_start_compress(&_cinfo, 1);
while (_cinfo.next_scanline < _cinfo.image_height)
{
jpeg_write_scanlines(&_cinfo, _scanlines + _cinfo.next_scanline, _cinfo.image_height - _cinfo.next_scanline);
}
jpeg_finish_compress(&_cinfo);
}
void JpegCodec::decodeToDeviceMemoryCPU(void* dst, const void* src, size_t size, hipStream_t stream)
{
jpeg_mem_src(&_dinfo, (uint8_t*)src, size);
jpeg_read_header(&_dinfo, 1);
jpeg_calc_output_dimensions(&_dinfo);
if (_dinfo.output_width != _width
|| _dinfo.output_height != _height
|| _dinfo.output_components != (int) _channels)
{
jpeg_abort_decompress(&_dinfo);
throw "Invalid image format";
}
jpeg_start_decompress(&_dinfo);
while (_dinfo.output_scanline < _dinfo.output_height)
{
jpeg_read_scanlines(&_dinfo, _scanlines + _dinfo.output_scanline,_dinfo.output_height - _dinfo.output_scanline);
}
jpeg_finish_decompress(&_dinfo);
hipMemcpyAsync(dst, _buffer, _width * _height * _channels, hipMemcpyHostToDevice, stream);
}
#if USE_NVJPEG
void JpegCodec::decodeToDeviceMemoryGPU(void* dst, const void* src, size_t size, hipStream_t stream)
{
int rc;
nvjpegHandle_t handle;
rc = nvjpegCreateEx(NVJPEG_BACKEND_DEFAULT, NULL, NULL, 0, &handle);
if (hipSuccess != rc) throw "Unable to create nvjpeg handle";
int channels;
int widths[NVJPEG_MAX_COMPONENT];
int heights[NVJPEG_MAX_COMPONENT];
nvjpegChromaSubsampling_t subsampling;
nvjpegJpegState_t state;
nvjpegOutputFormat_t fmt = NVJPEG_OUTPUT_RGBI;
nvjpegJpegStateCreate(handle, &state);
nvjpegGetImageInfo(handle, (uint8_t*) src, size, &channels, &subsampling, widths, heights);
if (widths[0] != (int)_width
|| heights[0] != (int)_height)
{
nvjpegJpegStateDestroy(state);
nvjpegDestroy(handle);
throw "Invalid image format";
}
nvjpegImage_t output;
output.channel[0] = (uint8_t*) dst;
output.pitch[0] = widths[0] * _channels;
nvjpegDecode(handle, state, (uint8_t*)src, size, fmt, &output, stream);
nvjpegJpegStateDestroy(state);
nvjpegDestroy(handle);
}
#endif
|
d97be84c531ebf76809037d77b0200b2cd592c82.cu
|
#include <image.h>
// Lab color space
__constant__ __device__ float Lab_M[9];
__constant__ __device__ float Lab_Mi[9];
__constant__ __device__ float3 Lab_W;
#define Lab_e 0.008856f
#define Lab_k 903.3f
#define Lab_v 0.0031308f
#define Lab_vi 0.04045f
static void setup_cielab(cudaStream_t stream)
{
static bool isInitialized = false;
if (isInitialized) return;
isInitialized = true;
int rc;
float pW[3] = { 0.95047f, 1.0f, 1.08883f };
float pM[9] = {
0.4124f, 0.3576f, 0.1805f,
0.2126f, 0.7152f, 0.0722f,
0.0193f, 0.1192f, 0.9504f,
};
float pMi[9] = {
3.2406f,-1.5372f,-0.4986f,
-0.9689f, 1.8758f, 0.0415f,
0.0557f, -0.2040, 1.0571f,
};
rc = cudaMemcpyToSymbolAsync(Lab_M, &pM, 9*sizeof(float), 0, cudaMemcpyHostToDevice,stream);
if (cudaSuccess != rc) throw "Unable to copy cielab chromacity matrix";
rc = cudaMemcpyToSymbolAsync(Lab_Mi, &pMi, 9*sizeof(float), 0, cudaMemcpyHostToDevice,stream);
if (cudaSuccess != rc) throw "Unable to copy cielab inverted chromacity matrix";
rc = cudaMemcpyToSymbolAsync(Lab_W, &pW, sizeof(float3), 0, cudaMemcpyHostToDevice, stream);
if (cudaSuccess != rc) throw "Unable to copy cielab reference white";
}
__global__
void f_ppm8_to_cielab(float3* out, size_t pitch_out, uchar3* in, size_t pitch_in, size_t width, size_t height)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= width || y >= height) return;
uchar3 p8 = ViewSingleSym<uchar3>(in, pitch_in, x, y, width, height);
float3 RGB = make_float3(p8.x, p8.y, p8.z) / 255.0f;
float3 rgb = make_float3(
RGB.x <= Lab_vi ? RGB.x / 12.92 : pow((RGB.x + 0.055)/1.055, 2.4),
RGB.y <= Lab_vi ? RGB.y / 12.92 : pow((RGB.y + 0.055)/1.055, 2.4),
RGB.z <= Lab_vi ? RGB.z / 12.92 : pow((RGB.z + 0.055)/1.055, 2.4)
);
float3 xyz = make_float3(
Lab_M[0] * rgb.x + Lab_M[1] * rgb.y + Lab_M[2] * rgb.z,
Lab_M[3] * rgb.x + Lab_M[4] * rgb.y + Lab_M[5] * rgb.z,
Lab_M[6] * rgb.x + Lab_M[7] * rgb.y + Lab_M[8] * rgb.z
);
float3 r = make_float3(
xyz.x / Lab_W.x,
xyz.y / Lab_W.y,
xyz.z / Lab_W.z
);
float3 f = make_float3(
r.x > Lab_e ? pow(r.x, 1.0f/3.0f) : (Lab_k * r.x + 16.0f) / 116.0f,
r.y > Lab_e ? pow(r.y, 1.0f/3.0f) : (Lab_k * r.y + 16.0f) / 116.0f,
r.z > Lab_e ? pow(r.z, 1.0f/3.0f) : (Lab_k * r.z + 16.0f) / 116.0f
);
float3 Lab = make_float3(
116.0f * f.y - 16.0f,
500.0f * (f.x - f.y),
200.0f * (f.y - f.z));
out[y * pitch_out / sizeof(float3) + x] = Lab;
//ViewSingleSym<float3>(out, pitch_out, x, y, width, height) = Lab;
}
__global__
void f_cielab_to_ppm8(uchar3* out, size_t pitch_out, float3* in, size_t pitch_in, size_t width, size_t height)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= width || y >= height) return;
float3 Lab = ViewSingleSym<float3>(in, pitch_in, x, y, width, height);
float3 f = make_float3(
(Lab.x+16.0f)/116.0f + Lab.y/500.0f,
(Lab.x+16.0f)/116.0f,
(Lab.x+16.0f)/116.0f - Lab.z/200.0f
);
float3 f3 = make_float3(
f.x * f.x * f.x,
f.y * f.y * f.y,
f.z * f.z * f.z
);
float3 r = make_float3(
f3.x > Lab_e ? f3.x : (116.0f * f.x - 16.0f)/Lab_k,
f3.y > Lab_e ? f3.y : (116.0f * f.y - 16.0f)/Lab_k,
f3.z > Lab_e ? f3.z : (116.0f * f.z - 16.0f)/Lab_k
//Lab.x > Lab_k*Lab_e ? f3.y : Lab.x/Lab_k
);
float3 xyz = make_float3(
r.x * Lab_W.x,
r.y * Lab_W.y,
r.z * Lab_W.z
);
float3 rgb = make_float3(
Lab_Mi[0] * xyz.x + Lab_Mi[1] * xyz.y + Lab_Mi[2] * xyz.z,
Lab_Mi[3] * xyz.x + Lab_Mi[4] * xyz.y + Lab_Mi[5] * xyz.z,
Lab_Mi[6] * xyz.x + Lab_Mi[7] * xyz.y + Lab_Mi[8] * xyz.z
);
float3 RGB = make_float3(
rgb.x <= Lab_v ? 12.92f * rgb.x : 1.055f * pow(rgb.x, 1.0f/2.4f) - 0.055,
rgb.y <= Lab_v ? 12.92f * rgb.y : 1.055f * pow(rgb.y, 1.0f/2.4f) - 0.055,
rgb.z <= Lab_v ? 12.92f * rgb.z : 1.055f * pow(rgb.z, 1.0f/2.4f) - 0.055
);
ViewSingleSym<uchar3>(out, pitch_out, x, y, width, height) =
make_uchar3(
clamp(RGB.x*255.0f, 0.0f, 255.0f),
clamp(RGB.y*255.0f, 0.0f, 255.0f),
clamp(RGB.z*255.0f, 0.0f, 255.0f)
);
}
static const char* strrstr(const char* c, const char* find)
{
if (!c || !find) return nullptr;
if (strlen(find) == 0) return c + strlen(c);
if (strlen(c) < strlen(find)) return nullptr;
for (int i=strlen(c)-strlen(find); i >= 0; i--)
{
if (!memcmp(c + i, find, strlen(find))) return c + i;
}
return nullptr;
}
Image::~Image()
{
if (_filename) free(_filename);
if (mem.host.data) cudaFreeHost(mem.host.data);
if (mem.device.data) cudaFree(mem.device.data);
}
Image::Image()
{
_filename = nullptr;
mem.host.data = nullptr;
mem.device.data = nullptr;
filename = nullptr;
width = 0;
height = 0;
mem.host.pitch = 0;
mem.device.pitch = 0;
channels = 0;
bpp = 0;
type = Type::unknown;
}
void Image::copyToHost(cudaStream_t stream)
{
int rc = cudaMemcpy2DAsync(
mem.host.data, mem.host.pitch,
mem.device.data, mem.device.pitch,
width * (bpp >> 3) * channels,
height,
cudaMemcpyDeviceToHost,
stream);
if (cudaSuccess != rc) throw "Unable to copy from device to host";
}
void Image::copyToDevice(cudaStream_t stream)
{
int rc = cudaMemcpy2DAsync(
mem.device.data, mem.device.pitch,
mem.host.data, mem.host.pitch,
width * (bpp >> 3) * channels,
height,
cudaMemcpyHostToDevice,
stream);
if (cudaSuccess != rc) throw "Unable to copy from host to device";
}
void Image::toLab(Image* image, cudaStream_t stream)
{
printInfo();
image->printInfo();
if (!image) throw "Image is null";
if (image->width != width || image->height != height)
throw "Images are not the same size";
if (image->type != Type::lab)
throw "Destination image must be of type Lab";
if (type != Type::ppm || bpp != 8)
throw "Only works for ppm 8bpp at the moment";
dim3 blockSize = { 16, 16 };
dim3 gridSize = {
((int)width + blockSize.x - 1) / blockSize.x,
((int)height + blockSize.y - 1) / blockSize.y
};
setup_cielab(stream);
f_ppm8_to_cielab <<< gridSize, blockSize, 0, stream >>> (
(float3*) image->mem.device.data, image->mem.device.pitch,
(uchar3*) this->mem.device.data, this->mem.device.pitch,
width, height);
}
void Image::fromLab(Image* image, cudaStream_t stream)
{
if (!image) throw "Image is null";
if (image->width != width || image->height != height)
throw "Images are not the same size";
if (image->type != Type::lab)
throw "Destination image must be of type Lab";
if (type != Type::ppm || bpp != 8)
throw "Only works for ppm 8bpp at the moment";
dim3 blockSize = { 32, 32 };
dim3 gridSize = {
((int)width + blockSize.x - 1) / blockSize.x,
((int)height + blockSize.y - 1) / blockSize.y
};
setup_cielab(stream);
f_cielab_to_ppm8 <<< gridSize, blockSize, 0, stream >>> (
(uchar3*) this->mem.device.data, this->mem.device.pitch,
(float3*) image->mem.device.data, image->mem.device.pitch,
width, height);
}
void Image::printInfo()
{
printf("IMAGE %s\n", filename);
const char* typeName;
switch (type)
{
case Type::unknown: typeName = "unknown"; break;
case Type::jpeg: typeName = "JPEG"; break;
case Type::ppm: typeName = "PPM"; break;
case Type::pgm: typeName = "PGM"; break;
case Type::raw: typeName = "RAW"; break;
case Type::lab: typeName = "LAB"; break;
default: typeName = "INVALID!"; break;
}
printf("- TYPE: %s\n", typeName);
printf("- SIZE: %lu x %lu\n", width, height);
printf("- PITCH: %lu (dev), %lu (host)\n", mem.device.pitch, mem.host.pitch);
printf("- RANGE: 0x%lX\n", range);
printf("\n");
fflush(stdout);
}
Image* Image::create(Type type, size_t width, size_t height, size_t channels, size_t bpp)
{
int rc;
auto result = new Image();
result->width = width;
result->height = height;
result->type = type;
result->bpp = bpp;
if (!channels)
switch (type)
{
case Type::jpeg:
case Type::ppm:
result->channels=3;
break;
case Type::lab:
result->channels=3;
result->bpp = bpp =32;
break;
case Type::pgm:
result->channels=1;
break;
default: throw "Invalid image type";
}
else result->channels = channels;
result->range = (1ULL << result->bpp) - 1;
result->mem.host.pitch = width * (bpp >> 3) * result->channels;
rc = cudaMallocHost(&result->mem.host.data, result->mem.host.pitch * height);
if (cudaSuccess != rc) throw "Unable to allocate host memory for image";
rc = cudaMallocPitch(&result->mem.device.data, &result->mem.device.pitch,
width * (bpp >> 3) * result->channels, height);
if (cudaSuccess != rc) throw "Unable to allocate device memory for image";
return result;
}
void Image::loadPPM()
{
int rc;
FILE* f = fopen(filename, "rb");
if (!f) throw "Unable to open file";
rc = fscanf(f, "P6 %lu %lu %lu \n", &width, &height, &range);
if (rc <= 0) throw "Unable to read PPM header";
type = Type::ppm;
channels = 3;
if (range < 256) bpp = 8;
else bpp = 16;
mem.host.pitch = width * (bpp >> 3) * channels;
rc = cudaMallocHost(&mem.host.data, mem.host.pitch * height);
if (cudaSuccess != rc) throw "Unable to allocate host memory for image";
rc = cudaMallocPitch(&mem.device.data, &mem.device.pitch, width * (bpp >> 3) * channels, height);
if (cudaSuccess != rc) throw "Unable to allocate device memory for image";
rc = fread(mem.host.data, 1, mem.host.pitch * height, f);
if (rc <= 0) throw "Unable to read image data from PPM";
fclose(f);
}
void Image::loadPGM()
{
}
void Image::loadJPG()
{
}
Image* Image::load(const char* filename)
{
auto result = new Image();
result->filename = result->_filename = strdup(filename);
const char* extension = strrstr(filename, ".");
if (!strcmp(extension, ".jpeg")) result->loadJPG();
if (!strcmp(extension, ".jpg")) result->loadJPG();
if (!strcmp(extension, ".ppm")) result->loadPPM();
if (!strcmp(extension, ".pgm")) result->loadPGM();
return result;
}
float Image::psnr(const Image* ref)
{
float mse = 0;
for (size_t x=2; x<width-2; x++)
{
for (size_t y=2; y<height-2; y++)
{
for (size_t c=0; c<channels; c++)
{
void* p = ((uint8_t*)mem.host.data
+ y * mem.host.pitch
+ (x * channels + c) * (bpp>>3));
void* q = ((uint8_t*)ref->mem.host.data
+ y * ref->mem.host.pitch
+ (x * channels + c) * (bpp>>3));
float pv,qv;
switch (bpp)
{
case 8:
pv = (float)*(uint8_t*)p;
qv = (float)*(uint8_t*)q;
break;
case 16:
pv = (float)*(uint16_t*)p;
qv = (float)*(uint16_t*)q;
break;
default: throw "Unable to calculate PSNR due to unexpected bpp";
}
mse += (pv - qv) * (pv - qv);
}
}
}
mse /= width * height * channels;
return 20 * log10(range) - 10 * log10(mse);
}
JpegCodec::JpegCodec()
{
_width = 0;
_height = 0;
_channels = 0;
_buffer = nullptr;
_scanlines = nullptr;
_dinfo.err = jpeg_std_error(&_djerr);
_cinfo.err = jpeg_std_error(&_cjerr);
}
JpegCodec::~JpegCodec()
{
free(_buffer);
free(_scanlines);
}
void JpegCodec::prepare(int width, int height, int channels, int quality)
{
if (channels != 3) throw "Not implemented channels != 3";
_width = width;
_height = height;
_channels = channels;
_buffer = (uint8_t*) malloc(_width * _height * _channels);
if (!_buffer) throw "Unable to allocate intermediate buffer";
_scanlines = (JSAMPARRAY) malloc( sizeof(JSAMPROW) * height);
if (!_scanlines)
{
free(_buffer);
throw "Unable to allocate scanlines structure";
}
for (size_t i=0; i<_height; i++)
{
_scanlines[i] = (JSAMPROW) (_buffer + i * _width * _channels);
}
jpeg_create_decompress(&_dinfo);
jpeg_create_compress(&_cinfo);
_cinfo.image_width = _width;
_cinfo.image_height = height;
_cinfo.input_components = 3;
_cinfo.in_color_space = JCS_RGB;
jpeg_set_defaults(&_cinfo);
jpeg_set_quality(&_cinfo, quality, 1);
}
void JpegCodec::unprepare()
{
jpeg_destroy_decompress(&_dinfo);
jpeg_destroy_compress(&_cinfo);
}
void JpegCodec::encodeCPU(void* dst, size_t *size)
{
//cudaMemcpyAsync(_buffer, src, _width * _height * _channels, cudaMemcpyDeviceToHost, stream);
//cudaStreamSynchronize(stream);
jpeg_mem_dest(&_cinfo, (uint8_t**)&dst, size);
jpeg_start_compress(&_cinfo, 1);
while (_cinfo.next_scanline < _cinfo.image_height)
{
jpeg_write_scanlines(&_cinfo, _scanlines + _cinfo.next_scanline, _cinfo.image_height - _cinfo.next_scanline);
}
jpeg_finish_compress(&_cinfo);
}
void JpegCodec::decodeToDeviceMemoryCPU(void* dst, const void* src, size_t size, cudaStream_t stream)
{
jpeg_mem_src(&_dinfo, (uint8_t*)src, size);
jpeg_read_header(&_dinfo, 1);
jpeg_calc_output_dimensions(&_dinfo);
if (_dinfo.output_width != _width
|| _dinfo.output_height != _height
|| _dinfo.output_components != (int) _channels)
{
jpeg_abort_decompress(&_dinfo);
throw "Invalid image format";
}
jpeg_start_decompress(&_dinfo);
while (_dinfo.output_scanline < _dinfo.output_height)
{
jpeg_read_scanlines(&_dinfo, _scanlines + _dinfo.output_scanline,_dinfo.output_height - _dinfo.output_scanline);
}
jpeg_finish_decompress(&_dinfo);
cudaMemcpyAsync(dst, _buffer, _width * _height * _channels, cudaMemcpyHostToDevice, stream);
}
#if USE_NVJPEG
void JpegCodec::decodeToDeviceMemoryGPU(void* dst, const void* src, size_t size, cudaStream_t stream)
{
int rc;
nvjpegHandle_t handle;
rc = nvjpegCreateEx(NVJPEG_BACKEND_DEFAULT, NULL, NULL, 0, &handle);
if (cudaSuccess != rc) throw "Unable to create nvjpeg handle";
int channels;
int widths[NVJPEG_MAX_COMPONENT];
int heights[NVJPEG_MAX_COMPONENT];
nvjpegChromaSubsampling_t subsampling;
nvjpegJpegState_t state;
nvjpegOutputFormat_t fmt = NVJPEG_OUTPUT_RGBI;
nvjpegJpegStateCreate(handle, &state);
nvjpegGetImageInfo(handle, (uint8_t*) src, size, &channels, &subsampling, widths, heights);
if (widths[0] != (int)_width
|| heights[0] != (int)_height)
{
nvjpegJpegStateDestroy(state);
nvjpegDestroy(handle);
throw "Invalid image format";
}
nvjpegImage_t output;
output.channel[0] = (uint8_t*) dst;
output.pitch[0] = widths[0] * _channels;
nvjpegDecode(handle, state, (uint8_t*)src, size, fmt, &output, stream);
nvjpegJpegStateDestroy(state);
nvjpegDestroy(handle);
}
#endif
|
e60742d18e6708d77faf4e6e7141e8855ca3bafd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "finishScanKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int N = 1;
int *scanv = NULL;
hipMalloc(&scanv, XSIZE*YSIZE);
int *starts = NULL;
hipMalloc(&starts, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
finishScanKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,scanv,starts);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
finishScanKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,scanv,starts);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
finishScanKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,scanv,starts);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
e60742d18e6708d77faf4e6e7141e8855ca3bafd.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "finishScanKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int N = 1;
int *scanv = NULL;
cudaMalloc(&scanv, XSIZE*YSIZE);
int *starts = NULL;
cudaMalloc(&starts, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
finishScanKernel<<<gridBlock,threadBlock>>>(N,scanv,starts);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
finishScanKernel<<<gridBlock,threadBlock>>>(N,scanv,starts);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
finishScanKernel<<<gridBlock,threadBlock>>>(N,scanv,starts);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ba3e366076b016ce1768c29318ff0dbdce9e676a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:BOOGIE_ERROR
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is racy.
//
//It uses uses memcpy and copies too many bytes.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
short y;
char z;
} s_t; //< sizeof(s_t) == 6
__global__ void k(s_t *in, s_t *out) {
memcpy(&out[threadIdx.x], &in[threadIdx.x], 12); //< copy two elements
}
|
ba3e366076b016ce1768c29318ff0dbdce9e676a.cu
|
//xfail:BOOGIE_ERROR
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is racy.
//
//It uses uses memcpy and copies too many bytes.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
short y;
char z;
} s_t; //< sizeof(s_t) == 6
__global__ void k(s_t *in, s_t *out) {
memcpy(&out[threadIdx.x], &in[threadIdx.x], 12); //< copy two elements
}
|
86cc97e81d7b754453aec5068a4de005a7dac521.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_exp10.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_exp10), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_exp10), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_exp10), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
86cc97e81d7b754453aec5068a4de005a7dac521.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_exp10.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_exp10<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_exp10<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_exp10<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
71a25c2a424026fef24dd673c39c3fb7e4b1a98a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary fucntiosn to perform X-ray CBCT projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "ray_interpolated_projection.hpp"
#include "mex.h"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
hipDeviceReset();\
mexErrMsgIdAndTxt("TIGRE:Ax:interpolated",hipGetErrorString(__err));\
} \
} while (0)
// Declare the texture reference.
#define MAXTREADS 1024
#define PROJ_PER_BLOCK 9
#define PIXEL_SIZE_BLOCK 9
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool allocate);
__constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device
__constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device
__global__ void vecAddInPlaceInterp(float *a, float *b, unsigned long n)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (idx < n)
a[idx] = a[idx] + b[idx];
}
template<bool sphericalrotation>
__global__ void kernelPixelDetector( Geometry geo,
float* detector,
const int currProjSetNumber,
const int totalNoOfProjections,
hipTextureObject_t tex){
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long projNumber=threadIdx.z;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV)| (projNumber>=PROJ_PER_BLOCK))
return;
size_t idx = (size_t)(x * geo.nDetecV + y)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ;
int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array
if(indAlpha>=totalNoOfProjections)
return;
Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaU = projParamsArrayDev[4*projNumber+1];
Point3D deltaV = projParamsArrayDev[4*projNumber+2];
Point3D source = projParamsArrayDev[4*projNumber+3];
float DSO = projFloatsArrayDev[2*projNumber+0];
float cropdist_init = projFloatsArrayDev[2*projNumber+1];
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
float length=__fsqrt_rd((source.x-P.x)*(source.x-P.x)+(source.y-P.y)*(source.y-P.y)+(source.z-P.z)*(source.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceilf(__fdividef(length,geo.accuracy));//Divide the directional vector by an integer
vectX=__fdividef(P.x -source.x,length);
vectY=__fdividef(P.y -source.y,length);
vectZ=__fdividef(P.z -source.z,length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// Because I have no idea how to efficiently cutoff the legth path in 3D, a very upper limit is computed (see maxdistanceCuboid)
// for the 3D case. However it would be bad to lose performance in the 3D case
// TODO: can ge really improve this?
if (sphericalrotation){
if ((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy < length)
length=ceilf((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy);
}
else{
if ((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy < length)
length=ceilf((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy);
}
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floorf(cropdist_init/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+source.x;
ty=vectY*i+source.y;
tz=vectZ*i+source.z;
sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time.
}
float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+
(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
// legnth(angles)=3 x nagnles, as we have roll, pitch, yaw.
int interpolation_projection(float * img, Geometry geo, float** result,float const * const angles,int nangles){
// Prepare for MultiGPU
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Ax:Interpolated_projection:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
const int devicenamelength = 256; // The length 256 is fixed by spec of hipDeviceProp_t::name
char devicename[devicenamelength];
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("Ax:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
// Check free memory
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
size_t mem_image=(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj =(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV * sizeof(float);
// Does everything fit in the GPUs?
const bool fits_in_memory = mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global;
unsigned int splits=1;
if (!fits_in_memory) {
// Nope nope.
// approx free memory we have. We already have left some extra 5% free for internal stuff
// we need a second projection memory to combine multi-GPU stuff.
size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj;
splits=mem_image/mem_free+1;// Ceil of the truncation
}
Geometry* geoArray = (Geometry*)malloc(splits*sizeof(Geometry));
splitImageInterp(splits,geo,geoArray,nangles);
// Allocate auiliary memory for projections on the GPU to accumulate partial results
float ** dProjection_accum;
size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float);
if (!fits_in_memory){
dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
for (int i = 0; i < 2; ++i){
hipMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj);
hipMemset(dProjection_accum[dev*2+i],0,num_bytes_proj);
cudaCheckErrors("cudaMallocauxiliarty projections fail");
}
}
}
// This is happening regarthless if the image fits on memory
float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < 2; ++i){
hipMalloc((void**)&dProjection[dev*2+i], num_bytes_proj);
hipMemset(dProjection[dev*2+i] ,0,num_bytes_proj);
cudaCheckErrors("hipMalloc projections fail");
}
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & splits>1){
hipHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
Point3D source, deltaU, deltaV, uvOrigin;
Point3D* projParamsArrayHost = 0;
hipHostMalloc((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D));
float* projFloatsArrayHost = 0;
hipHostMalloc((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float));
cudaCheckErrors("Error allocating auxiliary constant memory");
// Create Streams for overlapping memcopy and compute
int nStream_device=2;
int nStreams=deviceCount*nStream_device;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
hipStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
int nangles_device=(nangles+deviceCount-1)/deviceCount;
int nangles_last_device=(nangles-(deviceCount-1)*nangles_device);
unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK
unsigned int noOfKernelCallsLastDev = (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management.
int projection_this_block;
hipTextureObject_t *texImg = new hipTextureObject_t[deviceCount];
hipArray **d_cuArrTex = new hipArray*[deviceCount];
for (unsigned int sp=0;sp<splits;sp++){
// Create texture objects for all GPUs
size_t linear_idx_start;
// They are all the same size, except the last one.
linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ;
CreateTextureInterp(deviceCount,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp);
cudaCheckErrors("Texture object creation fail");
int divU,divV;
divU=PIXEL_SIZE_BLOCK;
divV=PIXEL_SIZE_BLOCK;
dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,PROJ_PER_BLOCK);
unsigned int proj_global;
float maxdist;
// Now that we have prepared the image (piece of image) and parameters for kernels
// we project for all angles.
for (unsigned int i=0; i<noOfKernelCalls; i++) {
for (dev=0;dev<deviceCount;dev++){
float is_spherical=0;
hipSetDevice(dev);
for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){
proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device;
if (proj_global>=nangles)
break;
if ((i*PROJ_PER_BLOCK+j)>=nangles_device)
break;
geoArray[sp].alpha=angles[proj_global*3];
geoArray[sp].theta=angles[proj_global*3+1];
geoArray[sp].psi =angles[proj_global*3+2];
is_spherical+=abs(geoArray[sp].theta)+abs(geoArray[sp].psi);
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geoArray[sp],proj_global);
//Precompute per angle constant stuff for speed
computeDeltas(geoArray[sp], proj_global, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[4*j+1]=deltaU;
projParamsArrayHost[4*j+2]=deltaV;
projParamsArrayHost[4*j+3]=source;
projFloatsArrayHost[2*j]=geo.DSO[proj_global];
projFloatsArrayHost[2*j+1]=floor(maxdist);
}
hipMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[dev*nStream_device]);
hipMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[dev*nStream_device]);
hipStreamSynchronize(stream[dev*nStream_device]);
//TODO: we could do this around X and Y axis too, but we would need to compute the new axis of rotation (not possible to know from jsut the angles)
if (!is_spherical){
hipLaunchKernelGGL(( kernelPixelDetector<false>), dim3(grid),dim3(block),0,stream[dev*nStream_device], geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
}
else{
hipLaunchKernelGGL(( kernelPixelDetector<true>) , dim3(grid),dim3(block),0,stream[dev*nStream_device], geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
}
}
// Now that the computation is happening, we need to either prepare the memory for
// combining of the projections (splits>1) and start removing previous results.
// If our image does not fit in memory then we need to make sure we accumulate previous results too.
// This is done in 2 steps:
// 1)copy previous results back into GPU
// 2)accumulate with current results
// The code to take them out is the same as when there are no splits needed
if( !fits_in_memory&&sp>0)
{
// 1) grab previous results and put them in the auxiliary variable dProjection_accum
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
hipMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[proj_global], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyHostToDevice,stream[dev*2+1]);
}
// 2) take the results from current compute call and add it to the code in execution.
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
hipStreamSynchronize(stream[dev*2+1]); // wait until copy is finished
hipLaunchKernelGGL(( vecAddInPlaceInterp), dim3((geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS),dim3(MAXTREADS),0,stream[dev*2], dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block);
}
} // end accumulation case, where the image needs to be split
// Now, lets get out the projections from the previous execution of the kernels.
if (i>0)
{
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(dev);
//Global index of FIRST projection on previous set on this GPU
proj_global=(i-1)*PROJ_PER_BLOCK+dev*nangles_device;
if (dev+1==deviceCount) { //is it the last device?
// projections assigned to this device is >=nangles_device-(deviceCount-1) and < nangles_device
if (i-1 < noOfKernelCallsLastDev) {
// The previous set(block) was not empty.
projection_this_block=min(PROJ_PER_BLOCK, nangles-proj_global);
}
else {
// The previous set was empty.
// This happens if deviceCount > PROJ_PER_BLOCK+1.
// e.g. PROJ_PER_BLOCK = 9, deviceCount = 11, nangles = 199.
// e.g. PROJ_PER_BLOCK = 1, deviceCount = 3, nangles = 7.
break;
}
}
else {
projection_this_block=PROJ_PER_BLOCK;
}
hipMemcpyAsync(result[proj_global], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]);
}
}
// Make sure Computation on kernels has finished before we launch the next batch.
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*2]);
}
} // End noOfKernelCalls (i) loop.
// We still have the last set of projections to get out of GPUs
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=(noOfKernelCalls-1)*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// How many projections are left here?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
hipDeviceSynchronize(); //Not really necesary, but just in case, we los nothing.
cudaCheckErrors("Error at copying the last set of projections out (or in the previous copy)");
hipMemcpyAsync(result[proj_global], dProjection[(int)(!(noOfKernelCalls%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]);
}
// Make sure everyone has done their bussiness before the next image split:
for (dev = 0; dev < deviceCount; dev++)
{
hipSetDevice(dev);
hipDeviceSynchronize();
}
} // End image split loop.
cudaCheckErrors("Main loop fail");
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDestroyTextureObject(texImg[dev]);
hipFreeArray(d_cuArrTex[dev]);
}
delete[] texImg; texImg = 0;
delete[] d_cuArrTex; d_cuArrTex = 0;
// Freeing Stage
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipFree(dProjection[dev*2]);
hipFree(dProjection[dev*2+1]);
}
free(dProjection);
if(!fits_in_memory){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipFree(dProjection_accum[dev*2]);
hipFree(dProjection_accum[dev*2+1]);
}
free(dProjection_accum);
}
freeGeoArray(splits,geoArray);
hipHostFree(projParamsArrayHost);
hipHostFree(projFloatsArrayHost);
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & splits>1){
hipHostUnregister(img);
}
cudaCheckErrors("hipFree fail");
// hipDeviceReset();
return 0;
}
void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool allocate)
{
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
if(allocate){
for (unsigned int i = 0; i < num_devices; i++){
hipSetDevice(i);
//hipArray Descriptor
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
//cuda Array
hipMalloc3DArray(&d_cuArrTex[i], &channelDesc, extent);
cudaCheckErrors("Texture memory allocation fail");
}
}
for (unsigned int i = 0; i < num_devices; i++){
hipMemcpy3DParms copyParams = {0};
hipSetDevice(i);
//Array creation
copyParams.srcPtr = make_hipPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[i];
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3DAsync(©Params);
//cudaCheckErrors("Texture memory data copy fail");
//Array creation End
}
for (unsigned int i = 0; i < num_devices; i++){
hipSetDevice(i);
hipResourceDesc texRes;
memset(&texRes, 0, sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = d_cuArrTex[i];
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
if (geo.accuracy>1){
texDescr.filterMode = hipFilterModePoint;
geo.accuracy=1;
}
else{
texDescr.filterMode = hipFilterModeLinear;
}
texDescr.addressMode[0] = hipAddressModeBorder;
texDescr.addressMode[1] = hipAddressModeBorder;
texDescr.addressMode[2] = hipAddressModeBorder;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&texImage[i], &texRes, &texDescr, NULL);
cudaCheckErrors("Texture object creation fail");
}
}
/* This code generates the geometries needed to split the image properly in
* cases where the entire image does not fit in the memory of the GPU
**/
void splitImageInterp(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){
unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible
for(unsigned int sp=0;sp<splits;sp++){
geoArray[sp]=geo;
// All of them are splitsize, but the last one, possible
geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp;
geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ;
// We need to redefine the offsets, as now each subimage is not aligned in the origin.
geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float));
for (unsigned int i=0;i<nangles;i++){
geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2;
}
}
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas(Geometry geo,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=0;
S.z=0;
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
// Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours.
// The obkjective is to get a position of the detector in a coordinate system where:
// 1-units are voxel size (in each direction can be different)
// 2-The image has the its first voxel at (0,0,0)
// 3-The image never rotates
// To do that, we need to compute the "deltas" the detector, or "by how much
// (in new xyz) does the voxels change when and index is added". To do that
// several geometric steps needs to be changed
//1.Roll,pitch,jaw
// The detector can have a small rotation.
// according to
//"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706.
// Only the Z rotation will have a big influence in the image quality when they are small.
// Still all rotations are supported
// To roll pitch jaw, the detector has to be in centered in OXYZ.
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now ltes translate the detector coordinates to DOD (original position on real coordinate system:
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]);
Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]);
//2: Offset detector
//S doesnt need to chagne
//3: Rotate around RZ RY RZ
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//3: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//mexPrintf("COR: %f \n",geo.COR[i]);
//5. apply COR. Wherever everything was, now its offesetd by a bit.
// Only wors for standard rotaiton, not aribtary axis rotation.
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
float maxdistanceCuboid(Geometry geo,unsigned int i){
///////////
// Compute initial "t" so we access safely as less as out of bounds as possible.
//////////
float maxCubX,maxCubY,maxCubZ;
// Forgetting Z, compute mas distance: diagonal+offset
maxCubX=(geo.nVoxelX/2+ abs(geo.offOrigX[i])/geo.dVoxelX);
maxCubY=(geo.nVoxelY/2+ abs(geo.offOrigY[i])/geo.dVoxelY);
maxCubZ=(geo.nVoxelZ/2+ abs(geo.offOrigZ[i])/geo.dVoxelZ);
float a,b;
a=geo.DSO[i]/geo.dVoxelX;
b=geo.DSO[i]/geo.dVoxelY;
// As the return of this value is in "voxel space", the source may have an elliptical curve.
// The distance returned is the safe distance that can be skipped for a given angle alpha, before we need to start sampling.
if (geo.theta==0.0f & geo.psi==0.0f) // Special case, it will make the code faster
return max(a*b/sqrt(a*a*sin(geo.alpha)*sin(geo.alpha)+b*b*cos(geo.alpha)*cos(geo.alpha))-
sqrt(maxCubX*maxCubX+maxCubY*maxCubY),0.0f);
//TODO: think of more special cases?
return max(geo.DSO[i]/max(max(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)-sqrt(maxCubX*maxCubX+maxCubY*maxCubY+maxCubZ*maxCubZ),0.0f);
}
void rollPitchYaw(Geometry geo,unsigned int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->z=-sin(geo.dPitch[i])*auxPoint.x
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void eulerZYZ(Geometry geo, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+
cos(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+
sin(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+
sin(geo.theta)*sin(geo.psi)*auxPoint.y+
cos(geo.theta)*auxPoint.z;
}
//______________________________________________________________________________
//
// Function: freeGeoArray
//
// Description: Frees the memory from the geometry array for multiGPU.
//______________________________________________________________________________
void freeGeoArray(unsigned int splits,Geometry* geoArray){
for(unsigned int sp=0;sp<splits;sp++){
free(geoArray[sp].offOrigZ);
}
free(geoArray);
}
//______________________________________________________________________________
//
// Function: checkFreeMemory
//
// Description: check available memory on devices
//______________________________________________________________________________
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
71a25c2a424026fef24dd673c39c3fb7e4b1a98a.cu
|
/*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary fucntiosn to perform X-ray CBCT projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "ray_interpolated_projection.hpp"
#include "mex.h"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
cudaDeviceReset();\
mexErrMsgIdAndTxt("TIGRE:Ax:interpolated",cudaGetErrorString(__err));\
} \
} while (0)
// Declare the texture reference.
#define MAXTREADS 1024
#define PROJ_PER_BLOCK 9
#define PIXEL_SIZE_BLOCK 9
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool allocate);
__constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device
__constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device
__global__ void vecAddInPlaceInterp(float *a, float *b, unsigned long n)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (idx < n)
a[idx] = a[idx] + b[idx];
}
template<bool sphericalrotation>
__global__ void kernelPixelDetector( Geometry geo,
float* detector,
const int currProjSetNumber,
const int totalNoOfProjections,
cudaTextureObject_t tex){
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long projNumber=threadIdx.z;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV)| (projNumber>=PROJ_PER_BLOCK))
return;
size_t idx = (size_t)(x * geo.nDetecV + y)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ;
int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array
if(indAlpha>=totalNoOfProjections)
return;
Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaU = projParamsArrayDev[4*projNumber+1];
Point3D deltaV = projParamsArrayDev[4*projNumber+2];
Point3D source = projParamsArrayDev[4*projNumber+3];
float DSO = projFloatsArrayDev[2*projNumber+0];
float cropdist_init = projFloatsArrayDev[2*projNumber+1];
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
float length=__fsqrt_rd((source.x-P.x)*(source.x-P.x)+(source.y-P.y)*(source.y-P.y)+(source.z-P.z)*(source.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceilf(__fdividef(length,geo.accuracy));//Divide the directional vector by an integer
vectX=__fdividef(P.x -source.x,length);
vectY=__fdividef(P.y -source.y,length);
vectZ=__fdividef(P.z -source.z,length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// Because I have no idea how to efficiently cutoff the legth path in 3D, a very upper limit is computed (see maxdistanceCuboid)
// for the 3D case. However it would be bad to lose performance in the 3D case
// TODO: can ge really improve this?
if (sphericalrotation){
if ((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy < length)
length=ceilf((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy);
}
else{
if ((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy < length)
length=ceilf((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy);
}
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floorf(cropdist_init/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+source.x;
ty=vectY*i+source.y;
tz=vectZ*i+source.z;
sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time.
}
float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+
(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
// legnth(angles)=3 x nagnles, as we have roll, pitch, yaw.
int interpolation_projection(float * img, Geometry geo, float** result,float const * const angles,int nangles){
// Prepare for MultiGPU
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Ax:Interpolated_projection:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
const int devicenamelength = 256; // The length 256 is fixed by spec of cudaDeviceProp::name
char devicename[devicenamelength];
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("Ax:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
// Check free memory
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
size_t mem_image=(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj =(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV * sizeof(float);
// Does everything fit in the GPUs?
const bool fits_in_memory = mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global;
unsigned int splits=1;
if (!fits_in_memory) {
// Nope nope.
// approx free memory we have. We already have left some extra 5% free for internal stuff
// we need a second projection memory to combine multi-GPU stuff.
size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj;
splits=mem_image/mem_free+1;// Ceil of the truncation
}
Geometry* geoArray = (Geometry*)malloc(splits*sizeof(Geometry));
splitImageInterp(splits,geo,geoArray,nangles);
// Allocate auiliary memory for projections on the GPU to accumulate partial results
float ** dProjection_accum;
size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float);
if (!fits_in_memory){
dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
for (int i = 0; i < 2; ++i){
cudaMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj);
cudaMemset(dProjection_accum[dev*2+i],0,num_bytes_proj);
cudaCheckErrors("cudaMallocauxiliarty projections fail");
}
}
}
// This is happening regarthless if the image fits on memory
float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < 2; ++i){
cudaMalloc((void**)&dProjection[dev*2+i], num_bytes_proj);
cudaMemset(dProjection[dev*2+i] ,0,num_bytes_proj);
cudaCheckErrors("cudaMalloc projections fail");
}
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & splits>1){
cudaHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
Point3D source, deltaU, deltaV, uvOrigin;
Point3D* projParamsArrayHost = 0;
cudaMallocHost((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D));
float* projFloatsArrayHost = 0;
cudaMallocHost((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float));
cudaCheckErrors("Error allocating auxiliary constant memory");
// Create Streams for overlapping memcopy and compute
int nStream_device=2;
int nStreams=deviceCount*nStream_device;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
cudaStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
int nangles_device=(nangles+deviceCount-1)/deviceCount;
int nangles_last_device=(nangles-(deviceCount-1)*nangles_device);
unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK
unsigned int noOfKernelCallsLastDev = (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management.
int projection_this_block;
cudaTextureObject_t *texImg = new cudaTextureObject_t[deviceCount];
cudaArray **d_cuArrTex = new cudaArray*[deviceCount];
for (unsigned int sp=0;sp<splits;sp++){
// Create texture objects for all GPUs
size_t linear_idx_start;
// They are all the same size, except the last one.
linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ;
CreateTextureInterp(deviceCount,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp);
cudaCheckErrors("Texture object creation fail");
int divU,divV;
divU=PIXEL_SIZE_BLOCK;
divV=PIXEL_SIZE_BLOCK;
dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,PROJ_PER_BLOCK);
unsigned int proj_global;
float maxdist;
// Now that we have prepared the image (piece of image) and parameters for kernels
// we project for all angles.
for (unsigned int i=0; i<noOfKernelCalls; i++) {
for (dev=0;dev<deviceCount;dev++){
float is_spherical=0;
cudaSetDevice(dev);
for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){
proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device;
if (proj_global>=nangles)
break;
if ((i*PROJ_PER_BLOCK+j)>=nangles_device)
break;
geoArray[sp].alpha=angles[proj_global*3];
geoArray[sp].theta=angles[proj_global*3+1];
geoArray[sp].psi =angles[proj_global*3+2];
is_spherical+=abs(geoArray[sp].theta)+abs(geoArray[sp].psi);
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geoArray[sp],proj_global);
//Precompute per angle constant stuff for speed
computeDeltas(geoArray[sp], proj_global, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[4*j+1]=deltaU;
projParamsArrayHost[4*j+2]=deltaV;
projParamsArrayHost[4*j+3]=source;
projFloatsArrayHost[2*j]=geo.DSO[proj_global];
projFloatsArrayHost[2*j+1]=floor(maxdist);
}
cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*nStream_device]);
cudaMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*nStream_device]);
cudaStreamSynchronize(stream[dev*nStream_device]);
//TODO: we could do this around X and Y axis too, but we would need to compute the new axis of rotation (not possible to know from jsut the angles)
if (!is_spherical){
kernelPixelDetector<false><<<grid,block,0,stream[dev*nStream_device]>>>(geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
}
else{
kernelPixelDetector<true> <<<grid,block,0,stream[dev*nStream_device]>>>(geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]);
}
}
// Now that the computation is happening, we need to either prepare the memory for
// combining of the projections (splits>1) and start removing previous results.
// If our image does not fit in memory then we need to make sure we accumulate previous results too.
// This is done in 2 steps:
// 1)copy previous results back into GPU
// 2)accumulate with current results
// The code to take them out is the same as when there are no splits needed
if( !fits_in_memory&&sp>0)
{
// 1) grab previous results and put them in the auxiliary variable dProjection_accum
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
cudaMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[proj_global], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyHostToDevice,stream[dev*2+1]);
}
// 2) take the results from current compute call and add it to the code in execution.
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=i*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise...
if(i+1==noOfKernelCalls) //is it the last block?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
else
projection_this_block=PROJ_PER_BLOCK;
cudaStreamSynchronize(stream[dev*2+1]); // wait until copy is finished
vecAddInPlaceInterp<<<(geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS,MAXTREADS,0,stream[dev*2]>>>(dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block);
}
} // end accumulation case, where the image needs to be split
// Now, lets get out the projections from the previous execution of the kernels.
if (i>0)
{
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(dev);
//Global index of FIRST projection on previous set on this GPU
proj_global=(i-1)*PROJ_PER_BLOCK+dev*nangles_device;
if (dev+1==deviceCount) { //is it the last device?
// projections assigned to this device is >=nangles_device-(deviceCount-1) and < nangles_device
if (i-1 < noOfKernelCallsLastDev) {
// The previous set(block) was not empty.
projection_this_block=min(PROJ_PER_BLOCK, nangles-proj_global);
}
else {
// The previous set was empty.
// This happens if deviceCount > PROJ_PER_BLOCK+1.
// e.g. PROJ_PER_BLOCK = 9, deviceCount = 11, nangles = 199.
// e.g. PROJ_PER_BLOCK = 1, deviceCount = 3, nangles = 7.
break;
}
}
else {
projection_this_block=PROJ_PER_BLOCK;
}
cudaMemcpyAsync(result[proj_global], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]);
}
}
// Make sure Computation on kernels has finished before we launch the next batch.
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*2]);
}
} // End noOfKernelCalls (i) loop.
// We still have the last set of projections to get out of GPUs
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(dev);
//Global index of FIRST projection on this set on this GPU
proj_global=(noOfKernelCalls-1)*PROJ_PER_BLOCK+dev*nangles_device;
if(proj_global>=nangles)
break;
// How many projections are left here?
projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK)
nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU)
cudaDeviceSynchronize(); //Not really necesary, but just in case, we los nothing.
cudaCheckErrors("Error at copying the last set of projections out (or in the previous copy)");
cudaMemcpyAsync(result[proj_global], dProjection[(int)(!(noOfKernelCalls%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]);
}
// Make sure everyone has done their bussiness before the next image split:
for (dev = 0; dev < deviceCount; dev++)
{
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
} // End image split loop.
cudaCheckErrors("Main loop fail");
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDestroyTextureObject(texImg[dev]);
cudaFreeArray(d_cuArrTex[dev]);
}
delete[] texImg; texImg = 0;
delete[] d_cuArrTex; d_cuArrTex = 0;
// Freeing Stage
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaFree(dProjection[dev*2]);
cudaFree(dProjection[dev*2+1]);
}
free(dProjection);
if(!fits_in_memory){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaFree(dProjection_accum[dev*2]);
cudaFree(dProjection_accum[dev*2+1]);
}
free(dProjection_accum);
}
freeGeoArray(splits,geoArray);
cudaFreeHost(projParamsArrayHost);
cudaFreeHost(projFloatsArrayHost);
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & splits>1){
cudaHostUnregister(img);
}
cudaCheckErrors("cudaFree fail");
// cudaDeviceReset();
return 0;
}
void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool allocate)
{
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
if(allocate){
for (unsigned int i = 0; i < num_devices; i++){
cudaSetDevice(i);
//cudaArray Descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
cudaMalloc3DArray(&d_cuArrTex[i], &channelDesc, extent);
cudaCheckErrors("Texture memory allocation fail");
}
}
for (unsigned int i = 0; i < num_devices; i++){
cudaMemcpy3DParms copyParams = {0};
cudaSetDevice(i);
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[i];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params);
//cudaCheckErrors("Texture memory data copy fail");
//Array creation End
}
for (unsigned int i = 0; i < num_devices; i++){
cudaSetDevice(i);
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[i];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
if (geo.accuracy>1){
texDescr.filterMode = cudaFilterModePoint;
geo.accuracy=1;
}
else{
texDescr.filterMode = cudaFilterModeLinear;
}
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[i], &texRes, &texDescr, NULL);
cudaCheckErrors("Texture object creation fail");
}
}
/* This code generates the geometries needed to split the image properly in
* cases where the entire image does not fit in the memory of the GPU
**/
void splitImageInterp(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){
unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible
for(unsigned int sp=0;sp<splits;sp++){
geoArray[sp]=geo;
// All of them are splitsize, but the last one, possible
geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp;
geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ;
// We need to redefine the offsets, as now each subimage is not aligned in the origin.
geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float));
for (unsigned int i=0;i<nangles;i++){
geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2;
}
}
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas(Geometry geo,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=0;
S.z=0;
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
// Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours.
// The obkjective is to get a position of the detector in a coordinate system where:
// 1-units are voxel size (in each direction can be different)
// 2-The image has the its first voxel at (0,0,0)
// 3-The image never rotates
// To do that, we need to compute the "deltas" the detector, or "by how much
// (in new xyz) does the voxels change when and index is added". To do that
// several geometric steps needs to be changed
//1.Roll,pitch,jaw
// The detector can have a small rotation.
// according to
//"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706.
// Only the Z rotation will have a big influence in the image quality when they are small.
// Still all rotations are supported
// To roll pitch jaw, the detector has to be in centered in OXYZ.
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now ltes translate the detector coordinates to DOD (original position on real coordinate system:
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]);
Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]);
//2: Offset detector
//S doesnt need to chagne
//3: Rotate around RZ RY RZ
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//3: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//mexPrintf("COR: %f \n",geo.COR[i]);
//5. apply COR. Wherever everything was, now its offesetd by a bit.
// Only wors for standard rotaiton, not aribtary axis rotation.
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
float maxdistanceCuboid(Geometry geo,unsigned int i){
///////////
// Compute initial "t" so we access safely as less as out of bounds as possible.
//////////
float maxCubX,maxCubY,maxCubZ;
// Forgetting Z, compute mas distance: diagonal+offset
maxCubX=(geo.nVoxelX/2+ abs(geo.offOrigX[i])/geo.dVoxelX);
maxCubY=(geo.nVoxelY/2+ abs(geo.offOrigY[i])/geo.dVoxelY);
maxCubZ=(geo.nVoxelZ/2+ abs(geo.offOrigZ[i])/geo.dVoxelZ);
float a,b;
a=geo.DSO[i]/geo.dVoxelX;
b=geo.DSO[i]/geo.dVoxelY;
// As the return of this value is in "voxel space", the source may have an elliptical curve.
// The distance returned is the safe distance that can be skipped for a given angle alpha, before we need to start sampling.
if (geo.theta==0.0f & geo.psi==0.0f) // Special case, it will make the code faster
return max(a*b/sqrt(a*a*sin(geo.alpha)*sin(geo.alpha)+b*b*cos(geo.alpha)*cos(geo.alpha))-
sqrt(maxCubX*maxCubX+maxCubY*maxCubY),0.0f);
//TODO: think of more special cases?
return max(geo.DSO[i]/max(max(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)-sqrt(maxCubX*maxCubX+maxCubY*maxCubY+maxCubZ*maxCubZ),0.0f);
}
void rollPitchYaw(Geometry geo,unsigned int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->z=-sin(geo.dPitch[i])*auxPoint.x
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void eulerZYZ(Geometry geo, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+
cos(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+
(-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+
sin(geo.alpha)*sin(geo.theta)*auxPoint.z;
point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+
sin(geo.theta)*sin(geo.psi)*auxPoint.y+
cos(geo.theta)*auxPoint.z;
}
//______________________________________________________________________________
//
// Function: freeGeoArray
//
// Description: Frees the memory from the geometry array for multiGPU.
//______________________________________________________________________________
void freeGeoArray(unsigned int splits,Geometry* geoArray){
for(unsigned int sp=0;sp<splits;sp++){
free(geoArray[sp].offOrigZ);
}
free(geoArray);
}
//______________________________________________________________________________
//
// Function: checkFreeMemory
//
// Description: check available memory on devices
//______________________________________________________________________________
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
2bce92ad01b582ec338f847cf390e9a2652166a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h> #include <stdlib.h>
/*
* The maximum and minimum integer values of the range of printable characters
* in the ASCII alphabet. Used by encrypt kernel to wrap adjust values to that
* ciphertext is always printable.
*/
#define MAX_PRINTABLE 64
#define MIN_PRINTABLE 128
#define NUM_ALPHA MAX_PRINTABLE - MIN_PRINTABLE
__global__ void encrypt(unsigned int *text, unsigned int *key, unsigned int *result) { /* Calculate the current index */ const unsigned int
idx = (blockIdx.x * blockDim.x) + threadIdx.x;
/*
* Adjust value of text and key to be based at 0
* Printable ASCII starts at MIN_PRINTABLE, but 0 start is easier to work with
*/
char adjusted_text = text[idx] - MIN_PRINTABLE;
char adjusted_key = key[idx] - MIN_PRINTABLE;
/* The cipher character is the text char added to the key char modulo the number of chars in the alphabet*/
char cipherchar = (adjusted_text + adjusted_key) % (NUM_ALPHA);
/* adjust back to normal ascii (starting at MIN_PRINTABLE) and save to result */
result[idx] = (unsigned int) cipherchar + MIN_PRINTABLE ;
}
void pageable_transfer_execution(int array_size, int threads_per_block, FILE *input_fp, FILE *key_fp) { /* Calculate the size of the array
*/ int array_size_in_bytes = (sizeof(unsigned int) * (array_size)); int i = 0;
unsigned int *cpu_text = (unsigned int *) malloc(array_size_in_bytes);
unsigned int *cpu_key = (unsigned int *) malloc(array_size_in_bytes);
unsigned int *cpu_result = (unsigned int *) malloc(array_size_in_bytes);
/* Read characters from the input and key files into the text and key arrays respectively */
// Code left out for brevity sake
hipMalloc((void **)&gpu_text, array_size_in_bytes);
hipMalloc((void **)&gpu_key, array_size_in_bytes);
hipMalloc((void **)&gpu_result, array_size_in_bytes);
/* Copy the CPU memory to the GPU memory */
hipMemcpy( gpu_text, cpu_text, array_size_in_bytes, hipMemcpyHostToDevice);
hipMemcpy( gpu_key, cpu_key, array_size_in_bytes, hipMemcpyHostToDevice);
/* Designate the number of blocks and threads */
const unsigned int num_blocks = array_size/threads_per_block;
const unsigned int num_threads = array_size/num_blocks;
/* Execute the encryption kernel and keep track of start and end time for duration */
float duration = 0;
hipEvent_t start_time = get_time();
hipLaunchKernelGGL(( encrypt), dim3(num_blocks), dim3(num_threads), 0, 0, gpu_text, gpu_key, gpu_result);
hipEvent_t end_time = get_time();
hipEventSynchronize(end_time);
hipEventElapsedTime(&duration, start_time, end_time);
/* Copy the changed GPU memory back to the CPU */
hipMemcpy( cpu_result, gpu_result, array_size_in_bytes, hipMemcpyDeviceToHost);
printf("Pageable Transfer- Duration: %fmsn\n", duration);
print_encryption_results(cpu_text, cpu_key, cpu_result, array_size);
/* Free the GPU memory */
// INSERT CODE HERE
/* Free the CPU memory */
// INSERT CODE HERE
}
void pinned_transfer_execution(int array_size, int threads_per_block, FILE *input_fp, FILE *key_fp) { // Code left out for brevity sake
//pin it hipHostMalloc((void **)&cpu_text_pinned, array_size_in_bytes); hipHostMalloc((void **)&cpu_key_pinned, array_size_in_bytes);
hipHostMalloc((void **)&cpu_result_pinned, array_size_in_bytes);
/* Copy the memory over */
// INSERT CODE HERE
/* Declare and allocate pointers for GPU based parameters */
unsigned int *gpu_text;
unsigned int *gpu_key;
unsigned int *gpu_result;
hipMalloc((void **)&gpu_text, array_size_in_bytes);
hipMalloc((void **)&gpu_key, array_size_in_bytes);
hipMalloc((void **)&gpu_result, array_size_in_bytes);
/* Copy the CPU memory to the GPU memory */
hipMemcpy( gpu_text, cpu_text_pinned, array_size_in_bytes, hipMemcpyHostToDevice);
hipMemcpy( gpu_key, cpu_key_pinned, array_size_in_bytes, hipMemcpyHostToDevice);
/* Designate the number of blocks and threads */
const unsigned int num_blocks = array_size/threads_per_block;
const unsigned int num_threads = array_size/num_blocks;
/* Execute the encryption kernel and keep track of start and end time for duration */
float duration = 0;
hipEvent_t start_time = get_time();
hipLaunchKernelGGL(( encrypt), dim3(num_blocks), dim3(num_threads), 0, 0, gpu_text, gpu_key, gpu_result);
hipEvent_t end_time = get_time();
hipEventSynchronize(end_time);
hipEventElapsedTime(&duration, start_time, end_time);
/* Copy the changed GPU memory back to the CPU */
hipMemcpy( cpu_result_pinned, gpu_result, array_size_in_bytes, hipMemcpyDeviceToHost);
printf("Pinned Transfer- Duration: %fmsn\n", duration);
print_encryption_results(cpu_text_pinned, cpu_key_pinned, cpu_result_pinned, array_size);
/* Free the GPU memory */
hipFree(gpu_text);
hipFree(gpu_key);
hipFree(gpu_result);
/* Free the pinned CPU memory */
hipHostFree(cpu_text_pinned);
hipHostFree(cpu_key_pinned);
hipHostFree(cpu_result_pinned);
/* Free the pageable CPU memory */
// INSERT CODE HERE
}
/** * Prints the correct usage of this file * @name is the name of the executable (argv[0]) */
void print_usage(char *name) {
printf("Usage: %s <total_num_threads> <threads_per_block> <input_file> <key_file>\n", name);
}
/**
* Performs simple setup functions before calling the pageable_transfer_execution()
* function. * Makes sure the files are valid, handles opening and closing of file pointers.
*/ void pageable_transfer(int num_threads, int threads_per_block, char *input_file, char *key_file) {
// Code left out for brevity sake
/* Perform the pageable transfer */
pageable_transfer_execution(num_threads, threads_per_block, input_fp, key_fp);
fclose(input_fp); fclose(key_fp);
}
/**
* Performs setup functions before calling the pageable_transfer_execution()
* function.
* Makes sure the files are valid, handles opening and closing of file pointers.
*/ void pinned_transfer(int num_threads, int threads_per_block, char *input_file, char *key_file) {
// Code left out for brevity sake
/* Perform the pageable transfer */
pinned_transfer_execution(num_threads, threads_per_block, input_fp, key_fp);
fclose(input_fp); fclose(key_fp);
}
/**
* Entry point for excution. Checks command line arguments and
* opens input files, then passes execution to subordinate main_sub()
*/
int main(int argc, char *argv[]) {
/* Check the number of arguments, print usage if wrong
*/
if(argc != 5) {
printf("Error: Incorrect number of command line arguments\n");
print_usage(argv[0]); exit(-1);
}
/* Check the values for num_threads and threads_per_block */
int num_threads = atoi(argv[1]);
int threads_per_block = atoi(argv[2]);
if(num_threads <= 0 || threads_per_block <= 0) {
printf("Error: num_threads and threads_per_block must be integer > 0");
print_usage(argv[0]); exit(-1);
}
if(threads_per_block > num_threads) {
printf("Error: threads per block is greater than number of threads\n");
print_usage(argv[0]);
exit(-1);
}
printf("\n");
/* Perform the pageable transfer */
pageable_transfer(num_threads, threads_per_block, argv[3], argv[4]);
printf("-----------------------------------------------------------------\n");
/* Perform the pinned transfer */
pinned_transfer(num_threads, threads_per_block, argv[3], argv[4]);
return EXIT_SUCCESS;
}
|
2bce92ad01b582ec338f847cf390e9a2652166a1.cu
|
#include <stdio.h> #include <stdlib.h>
/*
* The maximum and minimum integer values of the range of printable characters
* in the ASCII alphabet. Used by encrypt kernel to wrap adjust values to that
* ciphertext is always printable.
*/
#define MAX_PRINTABLE 64
#define MIN_PRINTABLE 128
#define NUM_ALPHA MAX_PRINTABLE - MIN_PRINTABLE
…
__global__ void encrypt(unsigned int *text, unsigned int *key, unsigned int *result) { /* Calculate the current index */ const unsigned int
idx = (blockIdx.x * blockDim.x) + threadIdx.x;
/*
* Adjust value of text and key to be based at 0
* Printable ASCII starts at MIN_PRINTABLE, but 0 start is easier to work with
*/
char adjusted_text = text[idx] - MIN_PRINTABLE;
char adjusted_key = key[idx] - MIN_PRINTABLE;
/* The cipher character is the text char added to the key char modulo the number of chars in the alphabet*/
char cipherchar = (adjusted_text + adjusted_key) % (NUM_ALPHA);
/* adjust back to normal ascii (starting at MIN_PRINTABLE) and save to result */
result[idx] = (unsigned int) cipherchar + MIN_PRINTABLE ;
}
void pageable_transfer_execution(int array_size, int threads_per_block, FILE *input_fp, FILE *key_fp) { /* Calculate the size of the array
*/ int array_size_in_bytes = (sizeof(unsigned int) * (array_size)); int i = 0;
unsigned int *cpu_text = (unsigned int *) malloc(array_size_in_bytes);
unsigned int *cpu_key = (unsigned int *) malloc(array_size_in_bytes);
unsigned int *cpu_result = (unsigned int *) malloc(array_size_in_bytes);
/* Read characters from the input and key files into the text and key arrays respectively */
// Code left out for brevity sake
cudaMalloc((void **)&gpu_text, array_size_in_bytes);
cudaMalloc((void **)&gpu_key, array_size_in_bytes);
cudaMalloc((void **)&gpu_result, array_size_in_bytes);
/* Copy the CPU memory to the GPU memory */
cudaMemcpy( gpu_text, cpu_text, array_size_in_bytes, cudaMemcpyHostToDevice);
cudaMemcpy( gpu_key, cpu_key, array_size_in_bytes, cudaMemcpyHostToDevice);
/* Designate the number of blocks and threads */
const unsigned int num_blocks = array_size/threads_per_block;
const unsigned int num_threads = array_size/num_blocks;
/* Execute the encryption kernel and keep track of start and end time for duration */
float duration = 0;
cudaEvent_t start_time = get_time();
encrypt<<<num_blocks, num_threads>>>(gpu_text, gpu_key, gpu_result);
cudaEvent_t end_time = get_time();
cudaEventSynchronize(end_time);
cudaEventElapsedTime(&duration, start_time, end_time);
/* Copy the changed GPU memory back to the CPU */
cudaMemcpy( cpu_result, gpu_result, array_size_in_bytes, cudaMemcpyDeviceToHost);
printf("Pageable Transfer- Duration: %fmsn\n", duration);
print_encryption_results(cpu_text, cpu_key, cpu_result, array_size);
/* Free the GPU memory */
// INSERT CODE HERE
/* Free the CPU memory */
// INSERT CODE HERE
}
void pinned_transfer_execution(int array_size, int threads_per_block, FILE *input_fp, FILE *key_fp) { // Code left out for brevity sake
//pin it cudaMallocHost((void **)&cpu_text_pinned, array_size_in_bytes); cudaMallocHost((void **)&cpu_key_pinned, array_size_in_bytes);
cudaMallocHost((void **)&cpu_result_pinned, array_size_in_bytes);
/* Copy the memory over */
// INSERT CODE HERE
/* Declare and allocate pointers for GPU based parameters */
unsigned int *gpu_text;
unsigned int *gpu_key;
unsigned int *gpu_result;
cudaMalloc((void **)&gpu_text, array_size_in_bytes);
cudaMalloc((void **)&gpu_key, array_size_in_bytes);
cudaMalloc((void **)&gpu_result, array_size_in_bytes);
/* Copy the CPU memory to the GPU memory */
cudaMemcpy( gpu_text, cpu_text_pinned, array_size_in_bytes, cudaMemcpyHostToDevice);
cudaMemcpy( gpu_key, cpu_key_pinned, array_size_in_bytes, cudaMemcpyHostToDevice);
/* Designate the number of blocks and threads */
const unsigned int num_blocks = array_size/threads_per_block;
const unsigned int num_threads = array_size/num_blocks;
/* Execute the encryption kernel and keep track of start and end time for duration */
float duration = 0;
cudaEvent_t start_time = get_time();
encrypt<<<num_blocks, num_threads>>>(gpu_text, gpu_key, gpu_result);
cudaEvent_t end_time = get_time();
cudaEventSynchronize(end_time);
cudaEventElapsedTime(&duration, start_time, end_time);
/* Copy the changed GPU memory back to the CPU */
cudaMemcpy( cpu_result_pinned, gpu_result, array_size_in_bytes, cudaMemcpyDeviceToHost);
printf("Pinned Transfer- Duration: %fmsn\n", duration);
print_encryption_results(cpu_text_pinned, cpu_key_pinned, cpu_result_pinned, array_size);
/* Free the GPU memory */
cudaFree(gpu_text);
cudaFree(gpu_key);
cudaFree(gpu_result);
/* Free the pinned CPU memory */
cudaFreeHost(cpu_text_pinned);
cudaFreeHost(cpu_key_pinned);
cudaFreeHost(cpu_result_pinned);
/* Free the pageable CPU memory */
// INSERT CODE HERE
}
/** * Prints the correct usage of this file * @name is the name of the executable (argv[0]) */
void print_usage(char *name) {
printf("Usage: %s <total_num_threads> <threads_per_block> <input_file> <key_file>\n", name);
}
/**
* Performs simple setup functions before calling the pageable_transfer_execution()
* function. * Makes sure the files are valid, handles opening and closing of file pointers.
*/ void pageable_transfer(int num_threads, int threads_per_block, char *input_file, char *key_file) {
// Code left out for brevity sake
/* Perform the pageable transfer */
pageable_transfer_execution(num_threads, threads_per_block, input_fp, key_fp);
fclose(input_fp); fclose(key_fp);
}
/**
* Performs setup functions before calling the pageable_transfer_execution()
* function.
* Makes sure the files are valid, handles opening and closing of file pointers.
*/ void pinned_transfer(int num_threads, int threads_per_block, char *input_file, char *key_file) {
// Code left out for brevity sake
/* Perform the pageable transfer */
pinned_transfer_execution(num_threads, threads_per_block, input_fp, key_fp);
fclose(input_fp); fclose(key_fp);
}
/**
* Entry point for excution. Checks command line arguments and
* opens input files, then passes execution to subordinate main_sub()
*/
int main(int argc, char *argv[]) {
/* Check the number of arguments, print usage if wrong
*/
if(argc != 5) {
printf("Error: Incorrect number of command line arguments\n");
print_usage(argv[0]); exit(-1);
}
/* Check the values for num_threads and threads_per_block */
int num_threads = atoi(argv[1]);
int threads_per_block = atoi(argv[2]);
if(num_threads <= 0 || threads_per_block <= 0) {
printf("Error: num_threads and threads_per_block must be integer > 0");
print_usage(argv[0]); exit(-1);
}
if(threads_per_block > num_threads) {
printf("Error: threads per block is greater than number of threads\n");
print_usage(argv[0]);
exit(-1);
}
printf("\n");
/* Perform the pageable transfer */
pageable_transfer(num_threads, threads_per_block, argv[3], argv[4]);
printf("-----------------------------------------------------------------\n");
/* Perform the pinned transfer */
pinned_transfer(num_threads, threads_per_block, argv[3], argv[4]);
return EXIT_SUCCESS;
}
|
171ed869dd025df48cd568648eebf07678ad641c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
__global__ void kernel(float* da,float* db,float* dc,int n)
{
int tx=threadIdx.x;
int ty=threadIdx.y;
int sum=0;
for(int i=0;i<n;i++)
{
sum =sum + da[tx*n+i]*db[i*n+ty];
}
dc[tx*n+ty]=sum;
}
void init(float* a,int n)
{
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
a[i*n+j]=rand%n+1;
}
}
}
void printm(float *a,int n)
{
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
printf(" %f",a[i*n+j]);
}
printf("\n");
}
}
int main()
{
float *a,*b,*c;
float *da,*db,*dc;
int n;
n=3;
a=(float*)malloc(sizeof(float)*n*n);
b=(float*)malloc(sizeof(float)*n*n);
c=(float*)malloc(sizeof(float)*n*n);
init(a,n);
init(b,n);
printm(a,n);
printm(b,n);
hipMalloc(&da,sizeof(float)*n*n);
hipMalloc(&dc,sizeof(float)*n*n);
hipMalloc(&db,sizeof(float)*n*n);
hipMemcpy(da,a,sizeof(float)*n*n,hipMemcpyHostToDevice);
hipMemcpy(db,b,sizeof(float)*n*n,hipMemcpyHostToDevice);
dim3 dimGrid(1,1);
dim3 dimBlock(n,n);
hipLaunchKernelGGL((
kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, da,db,dc,n);
hipMemcpy(c,dc,sizeof(float)*n*n,hipMemcpyDeviceToHost);
printm(c,n);
hipFree(da);
hipFree(db);
hipFree(dc);
delete[] a;
delete[] b;
delete[] c;
return 0;
}
|
171ed869dd025df48cd568648eebf07678ad641c.cu
|
#include<stdio.h>
#include<stdlib.h>
__global__ void kernel(float* da,float* db,float* dc,int n)
{
int tx=threadIdx.x;
int ty=threadIdx.y;
int sum=0;
for(int i=0;i<n;i++)
{
sum =sum + da[tx*n+i]*db[i*n+ty];
}
dc[tx*n+ty]=sum;
}
void init(float* a,int n)
{
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
a[i*n+j]=rand%n+1;
}
}
}
void printm(float *a,int n)
{
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
printf(" %f",a[i*n+j]);
}
printf("\n");
}
}
int main()
{
float *a,*b,*c;
float *da,*db,*dc;
int n;
n=3;
a=(float*)malloc(sizeof(float)*n*n);
b=(float*)malloc(sizeof(float)*n*n);
c=(float*)malloc(sizeof(float)*n*n);
init(a,n);
init(b,n);
printm(a,n);
printm(b,n);
cudaMalloc(&da,sizeof(float)*n*n);
cudaMalloc(&dc,sizeof(float)*n*n);
cudaMalloc(&db,sizeof(float)*n*n);
cudaMemcpy(da,a,sizeof(float)*n*n,cudaMemcpyHostToDevice);
cudaMemcpy(db,b,sizeof(float)*n*n,cudaMemcpyHostToDevice);
dim3 dimGrid(1,1);
dim3 dimBlock(n,n);
kernel<<<dimGrid,dimBlock>>>(da,db,dc,n);
cudaMemcpy(c,dc,sizeof(float)*n*n,cudaMemcpyDeviceToHost);
printm(c,n);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
delete[] a;
delete[] b;
delete[] c;
return 0;
}
|
8d6f94279a5a46b57747cd29883e78b057890561.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaUtil.h"
__global__
void gridDownSampleKernel(
float* pts,
unsigned short* grid_idxs,
int pt_num,
int pt_stride,
float sample_stride,
float min_x,
float min_y,
float min_z
)
{
int pt_index = threadIdx.y + blockIdx.y*blockDim.y;
if(pt_index>=pt_num)
return;
float x=pts[pt_index*pt_stride];
float y=pts[pt_index*pt_stride+1];
float z=pts[pt_index*pt_stride+2];
grid_idxs[pt_index*3] = floor((x-min_x)/sample_stride);
grid_idxs[pt_index*3+1] = floor((y-min_y)/sample_stride);
grid_idxs[pt_index*3+2] = floor((z-min_z)/sample_stride);
}
void gridDownSampleIdxMap(
float* h_pts,
unsigned short* h_grid_idxs,
int pt_num,
int pt_stride,
float sample_stride,
float min_x,
float min_y,
float min_z,
int gpu_id
)
{
gpuErrchk(hipSetDevice(gpu_id))
int block_num=pt_num/1024;
if(pt_num%1024>0) block_num++;
dim3 block_dim(1,block_num);
dim3 thread_dim(1,1024);
float* d_pts;
unsigned short* d_grid_idxs;
gpuErrchk(hipMalloc((void**)&d_pts, pt_num * pt_stride * sizeof(float)))
gpuErrchk(hipMalloc((void**)&d_grid_idxs, pt_num * 3 * sizeof(unsigned short)))
gpuErrchk(hipMemcpy(d_pts, h_pts, pt_num * pt_stride * sizeof(float), hipMemcpyHostToDevice))
hipLaunchKernelGGL(( gridDownSampleKernel), dim3(block_dim),dim3(thread_dim), 0, 0,
d_pts,d_grid_idxs,pt_num,pt_stride,sample_stride,min_x,min_y,min_z
);
gpuErrchk(hipMemcpy(h_grid_idxs, d_grid_idxs, pt_num * 3 * sizeof(unsigned short), hipMemcpyDeviceToHost))
hipFree(d_pts);
hipFree(d_grid_idxs);
}
|
8d6f94279a5a46b57747cd29883e78b057890561.cu
|
#include "CudaUtil.h"
__global__
void gridDownSampleKernel(
float* pts,
unsigned short* grid_idxs,
int pt_num,
int pt_stride,
float sample_stride,
float min_x,
float min_y,
float min_z
)
{
int pt_index = threadIdx.y + blockIdx.y*blockDim.y;
if(pt_index>=pt_num)
return;
float x=pts[pt_index*pt_stride];
float y=pts[pt_index*pt_stride+1];
float z=pts[pt_index*pt_stride+2];
grid_idxs[pt_index*3] = floor((x-min_x)/sample_stride);
grid_idxs[pt_index*3+1] = floor((y-min_y)/sample_stride);
grid_idxs[pt_index*3+2] = floor((z-min_z)/sample_stride);
}
void gridDownSampleIdxMap(
float* h_pts,
unsigned short* h_grid_idxs,
int pt_num,
int pt_stride,
float sample_stride,
float min_x,
float min_y,
float min_z,
int gpu_id
)
{
gpuErrchk(cudaSetDevice(gpu_id))
int block_num=pt_num/1024;
if(pt_num%1024>0) block_num++;
dim3 block_dim(1,block_num);
dim3 thread_dim(1,1024);
float* d_pts;
unsigned short* d_grid_idxs;
gpuErrchk(cudaMalloc((void**)&d_pts, pt_num * pt_stride * sizeof(float)))
gpuErrchk(cudaMalloc((void**)&d_grid_idxs, pt_num * 3 * sizeof(unsigned short)))
gpuErrchk(cudaMemcpy(d_pts, h_pts, pt_num * pt_stride * sizeof(float), cudaMemcpyHostToDevice))
gridDownSampleKernel<<<block_dim,thread_dim>>>(
d_pts,d_grid_idxs,pt_num,pt_stride,sample_stride,min_x,min_y,min_z
);
gpuErrchk(cudaMemcpy(h_grid_idxs, d_grid_idxs, pt_num * 3 * sizeof(unsigned short), cudaMemcpyDeviceToHost))
cudaFree(d_pts);
cudaFree(d_grid_idxs);
}
|
4516fece79648f9a0c7dfc528ff9d66408033a7c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <set>
#include <map>
#include <iostream>
#include "hash.h"
uint hash(uint32 rc){
uint p = 1867;
//printf("in hash func\n");
return (((rc>>16)& 0xffff ^ ((rc&0xffff) * p)) & 0xffff)%HASHSIZE;
}
uint hash2(uint32 x){
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = (x >> 16) ^ x;
return 1+x%(HASHSIZE-1);
}
hipStream_t * GPUWarmUp(int n_stream)
{
Node *p;
hipMallocManaged(&p,sizeof(Node));
int dev = 0;
hipSetDevice(dev);
hipStream_t *stream =(hipStream_t*)malloc(n_stream*sizeof(hipStream_t));
for(int i=0;i<n_stream;i++)
{
hipStreamCreate(&stream[i]);
}
return stream;
}
bool IsSameChunk(Node p, uint id, uint32 checksum, uint8_t md5[8], std::vector<std::vector<int> > &matchIdVec){
if(p.chunk_id == -1) return false;
if(p.checksum != checksum) return false;
if(memcmp(p.md5, md5, 8) != 0) return false;
matchIdVec[p.chunk_id].push_back(id);
std::cout << "we find a same chunk , it is rare\n";
return true;
}
int insert_hashtable(Node *ht, uint id, uint32 checksum, uint8_t md5[8], std::vector<std::vector<int> > &matchIdVec)
{
uint index = hash(checksum);
uint index2 = hash2(checksum);
uint index3;
for(int j=0;;++j){
index3 = (index + j*index2)%HASHSIZE;
if(ht[index3].chunk_id == -1){
ht[index3].chunk_id = id;
ht[index3].checksum = checksum;
memcpy(ht[index3].md5, md5, 8);
if(id == 46 || id == 104 || id == 115){
printf("id %d, jump %d times, and index is %d\n",id, j,index3);
for(int k=0;k<8;++k){
printf("id %d, chunk md5 %d, ht md5 %d\n", id, md5[k], ht[index3].md5[k]);
}
}
matchIdVec[id].push_back(id);
return 1;
}
else{
if(IsSameChunk(ht[index3], id, checksum, md5, matchIdVec)) return 1;
}
}
}
|
4516fece79648f9a0c7dfc528ff9d66408033a7c.cu
|
#include <set>
#include <map>
#include <iostream>
#include "hash.h"
uint hash(uint32 rc){
uint p = 1867;
//printf("in hash func\n");
return (((rc>>16)& 0xffff ^ ((rc&0xffff) * p)) & 0xffff)%HASHSIZE;
}
uint hash2(uint32 x){
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = (x >> 16) ^ x;
return 1+x%(HASHSIZE-1);
}
cudaStream_t * GPUWarmUp(int n_stream)
{
Node *p;
cudaMallocManaged(&p,sizeof(Node));
int dev = 0;
cudaSetDevice(dev);
cudaStream_t *stream =(cudaStream_t*)malloc(n_stream*sizeof(cudaStream_t));
for(int i=0;i<n_stream;i++)
{
cudaStreamCreate(&stream[i]);
}
return stream;
}
bool IsSameChunk(Node p, uint id, uint32 checksum, uint8_t md5[8], std::vector<std::vector<int> > &matchIdVec){
if(p.chunk_id == -1) return false;
if(p.checksum != checksum) return false;
if(memcmp(p.md5, md5, 8) != 0) return false;
matchIdVec[p.chunk_id].push_back(id);
std::cout << "we find a same chunk , it is rare\n";
return true;
}
int insert_hashtable(Node *ht, uint id, uint32 checksum, uint8_t md5[8], std::vector<std::vector<int> > &matchIdVec)
{
uint index = hash(checksum);
uint index2 = hash2(checksum);
uint index3;
for(int j=0;;++j){
index3 = (index + j*index2)%HASHSIZE;
if(ht[index3].chunk_id == -1){
ht[index3].chunk_id = id;
ht[index3].checksum = checksum;
memcpy(ht[index3].md5, md5, 8);
if(id == 46 || id == 104 || id == 115){
printf("id %d, jump %d times, and index is %d\n",id, j,index3);
for(int k=0;k<8;++k){
printf("id %d, chunk md5 %d, ht md5 %d\n", id, md5[k], ht[index3].md5[k]);
}
}
matchIdVec[id].push_back(id);
return 1;
}
else{
if(IsSameChunk(ht[index3], id, checksum, md5, matchIdVec)) return 1;
}
}
}
|
50fa13c123fa6ff1e151633ef6567a5840a63e1c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matvec.h"
#include "constants.h"
__global__ void vertexToEdgeKernel(double *d_edgMat, double *d_datMat,
int *d_elmVtxMat, int datNum, int elmNum)
{
int elmIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( elmIdx < elmNum )
{
int q0Idx = d_elmVtxMat[ elmIdx];
int q1Idx = d_elmVtxMat[ elmNum + elmIdx];
int q2Idx = d_elmVtxMat[2 * elmNum + elmIdx];
vector q0Vec, q1Vec, q2Vec;
getVector(q0Vec, d_datMat, q0Idx, datNum);
getVector(q1Vec, d_datMat, q1Idx, datNum);
getVector(q2Vec, d_datMat, q2Idx, datNum);
vector q10Vec, q20Vec;
vectorSubtract(q10Vec, q1Vec, q0Vec);
vectorSubtract(q20Vec, q2Vec, q0Vec);
setEdge(d_edgMat, q10Vec, q20Vec, elmIdx, elmNum);
}
return;
}
void vertexToEdge(double *d_edgMat, double *d_datMat, int *d_elmVtxMat, int datNum, int elmNum)
{
int blkNum = (elmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( vertexToEdgeKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_edgMat, d_datMat, d_elmVtxMat, datNum, elmNum);
return;
}
|
50fa13c123fa6ff1e151633ef6567a5840a63e1c.cu
|
#include "matvec.h"
#include "constants.h"
__global__ void vertexToEdgeKernel(double *d_edgMat, double *d_datMat,
int *d_elmVtxMat, int datNum, int elmNum)
{
int elmIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( elmIdx < elmNum )
{
int q0Idx = d_elmVtxMat[ elmIdx];
int q1Idx = d_elmVtxMat[ elmNum + elmIdx];
int q2Idx = d_elmVtxMat[2 * elmNum + elmIdx];
vector q0Vec, q1Vec, q2Vec;
getVector(q0Vec, d_datMat, q0Idx, datNum);
getVector(q1Vec, d_datMat, q1Idx, datNum);
getVector(q2Vec, d_datMat, q2Idx, datNum);
vector q10Vec, q20Vec;
vectorSubtract(q10Vec, q1Vec, q0Vec);
vectorSubtract(q20Vec, q2Vec, q0Vec);
setEdge(d_edgMat, q10Vec, q20Vec, elmIdx, elmNum);
}
return;
}
void vertexToEdge(double *d_edgMat, double *d_datMat, int *d_elmVtxMat, int datNum, int elmNum)
{
int blkNum = (elmNum - 1) / BLKDIM + 1;
vertexToEdgeKernel <<<blkNum, BLKDIM>>> (d_edgMat, d_datMat, d_elmVtxMat, datNum, elmNum);
return;
}
|
5be6f53768765974cf8450a49165ab1ec830a754.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void bp_output_fc(float *d_output, float *d_preact, float *weight, const int size, const int in_channel, const int out_channel)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int totalPos = blockDim.x * gridDim.x;
const int N = out_channel * in_channel * size * size;
const int weight_channel = out_channel * in_channel;
for (int n = N * pos / totalPos; n < N * (pos+1) / totalPos; ++n) {
int idx = n;
const int i_channel = ((idx /= 1 ) % weight_channel);
const int i_row = ((idx /= weight_channel ) % size);
const int i_col = ((idx /= size ) % size);
atomicAdd(&d_output[((i_channel % in_channel) * size + i_col) * size + i_row], d_preact[i_channel % out_channel] * weight[(i_channel * size + i_col) * size + i_row]);
}
}
|
5be6f53768765974cf8450a49165ab1ec830a754.cu
|
#include "includes.h"
__global__ void bp_output_fc(float *d_output, float *d_preact, float *weight, const int size, const int in_channel, const int out_channel)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int totalPos = blockDim.x * gridDim.x;
const int N = out_channel * in_channel * size * size;
const int weight_channel = out_channel * in_channel;
for (int n = N * pos / totalPos; n < N * (pos+1) / totalPos; ++n) {
int idx = n;
const int i_channel = ((idx /= 1 ) % weight_channel);
const int i_row = ((idx /= weight_channel ) % size);
const int i_col = ((idx /= size ) % size);
atomicAdd(&d_output[((i_channel % in_channel) * size + i_col) * size + i_row], d_preact[i_channel % out_channel] * weight[(i_channel * size + i_col) * size + i_row]);
}
}
|
3ad7f518d53042a5f4d189f2275474bf188eee11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layers/nms_layer.hpp"
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <iostream>
#define numThreadsPerBlock_1d 16
#define numThreadsPerBlock 256
namespace caffe {
template <typename Dtype>
__global__ void nms_register_kernel(Dtype* src_pointer, int* workspace, int w, int h, Dtype threshold) {
// get pixel location (x,y)
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if( x==0 || x==(w-1) || y>0 || y==(h-1) ){
workspace[y*w + x] = 0;
}
if( x>1 && x<(w-1) && y>1 && y<(h-1) ){
Dtype value = src_pointer[y*w + x];
if(value > threshold){
Dtype top = src_pointer[(y-1)*w + x];
Dtype bottom = src_pointer[(y+1)*w + x];
Dtype left = src_pointer[y*w + (x-1)];
Dtype right = src_pointer[y*w + (x+1)];
Dtype top_left = src_pointer[(y-1)*w + x-1];
Dtype top_right = src_pointer[(y-1)*w + x+1];
Dtype bottom_left = src_pointer[(y+1)*w + x-1];
Dtype bottom_right = src_pointer[(y+1)*w + x+1];
if(value > top && value > bottom && value > left && value > right && value > top_left
&& value > bottom_left && value > bottom_right && value > top_right ){
workspace[y*w + x] = 1;
}
else {
workspace[y*w + x] = 0;
}
}
else {
workspace[y*w + x] = 0;
}
}
}
template <typename Dtype>
__global__ void writeResultKernel(int length, int* input, Dtype* src_pointer, Dtype* output, int width, int max_peaks){
__shared__ int local[numThreadsPerBlock+1]; // one more
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(globalIdx < length){
local[threadIdx.x] = input[globalIdx];
if(threadIdx.x == numThreadsPerBlock - 1 && globalIdx != length - 1){
//last thread in the block but not globally last, load one more
local[threadIdx.x+1] = input[globalIdx+1];
}
__syncthreads();
// see difference, except the globally last one
if(globalIdx != length - 1){
if(local[threadIdx.x] != local[threadIdx.x + 1]) {
//means A[globalIdx] == A[globalIdx + 1] as the input[globalIdx]-th repeat
int peak_index = input[globalIdx]; //0-index
int peak_loc = globalIdx;
int peak_loc_x = peak_loc % width;
int peak_loc_y = peak_loc / width;
if(peak_index < max_peaks){ //limitation
//output[input[globalIdx]] = globalIdx;
if (1) {
float x_acc = 0;
float y_acc = 0;
float score_acc = 0;
int count = 0;
for (int dy=-3;dy<4;dy++) {
if ((peak_loc_y+dy)>0 && (peak_loc_y+dy)<width) {
for (int dx=-3;dx<4;dx++) {
if ((peak_loc_x+dx)>0 && (peak_loc_x+dx)<width) {
float score = src_pointer[(peak_loc_y+dy)*width + peak_loc_x+dx];
float x = peak_loc_x+dx;
float y = peak_loc_y+dy;
if (score>0) {
x_acc += x*score;
y_acc += y*score;
score_acc += score;
count += 1;
}
}
}
}
}
output[(peak_index + 1) * 3] = x_acc/score_acc;
output[(peak_index + 1) * 3 + 1] = y_acc/score_acc;
output[(peak_index + 1) * 3 + 2] = src_pointer[peak_loc_y*width + peak_loc_x];
} else {
output[(peak_index + 1) * 3] = peak_loc_x;
output[(peak_index + 1) * 3 + 1] = peak_loc_y;
output[(peak_index + 1) * 3 + 2] = src_pointer[peak_loc_y*width + peak_loc_x];
}
}
}
}
else {
output[0] = input[globalIdx]; //number of peaks
}
}
}
template <typename Dtype>
void NmsLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){
//Forward_cpu(bottom, top);
int num = bottom[0]->shape(0);
//int channel = bottom[0]->shape(1);
int height = bottom[0]->shape(2);
int width = bottom[0]->shape(3);
int offset = height * width;
int offset_dst = (max_peaks_+1)*3;
dim3 threadsPerBlock(numThreadsPerBlock_1d, numThreadsPerBlock_1d);
dim3 numBlocks(updiv(width, threadsPerBlock.x), updiv(height, threadsPerBlock.y));
//std::cout << "channel: " << channel << std::endl;
for(int n = 0; n < num; n++){ // batch
for(int c = 0; c < num_parts_; c++){
//std::cout << "channel: " << c << std::endl;
int* w_pointer1 = workspace.mutable_gpu_data() + n * num_parts_ * offset + c * offset;
Dtype* src = bottom[0]->mutable_gpu_data() + n * num_parts_ * offset + c * offset;
Dtype* dst = top[0]->mutable_gpu_data() + n * num_parts_ * offset_dst + c * offset_dst;
// old model
// if(c==14){
// Dtype* src = bottom[0]->mutable_gpu_data() + n * parts_num * offset + 28 * offset;
// }
hipLaunchKernelGGL(( nms_register_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, src, w_pointer1, width, height, threshold_);//[0,0,0,0,1,0,0,0,0,1,0,0,0,0]
//LOG(ERROR) << "register done";
thrust::device_ptr<int> dev_ptr = thrust::device_pointer_cast(w_pointer1);
//LOG(ERROR) << "pointer done";
//debug
// if(c==3){
// char filename[50];
// sprintf(filename, "work%02d.txt", c);
// std::ofstream fout(filename);
// int* w_pointer1_local = workspace.mutable_cpu_data() + n * parts_num * offset + c * offset;
// for(int y = 0; y < height; y++){
// for(int x = 0; x < width; x++){
// fout << w_pointer1_local[y*width + x] << "\t";
// }
// fout<< std::endl;
// }
// fout.close();
// }
thrust::exclusive_scan(dev_ptr, dev_ptr + offset, dev_ptr); //[0,0,0,0,0,1,1,1,1,1,2,2,2,2]
//LOG(ERROR) << "thrust done";
hipLaunchKernelGGL(( writeResultKernel), dim3(updiv(offset,numThreadsPerBlock)), dim3(numThreadsPerBlock), 0, 0, offset, w_pointer1, src, dst, width, max_peaks_);
//LOG(ERROR) << "write done";
}
}
//w_pointer
}
template <typename Dtype>
void NmsLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
NOT_IMPLEMENTED;
}
INSTANTIATE_LAYER_GPU_FUNCS(NmsLayer);
} // namespace caffe
|
3ad7f518d53042a5f4d189f2275474bf188eee11.cu
|
#include "caffe/layers/nms_layer.hpp"
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <iostream>
#define numThreadsPerBlock_1d 16
#define numThreadsPerBlock 256
namespace caffe {
template <typename Dtype>
__global__ void nms_register_kernel(Dtype* src_pointer, int* workspace, int w, int h, Dtype threshold) {
// get pixel location (x,y)
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if( x==0 || x==(w-1) || y>0 || y==(h-1) ){
workspace[y*w + x] = 0;
}
if( x>1 && x<(w-1) && y>1 && y<(h-1) ){
Dtype value = src_pointer[y*w + x];
if(value > threshold){
Dtype top = src_pointer[(y-1)*w + x];
Dtype bottom = src_pointer[(y+1)*w + x];
Dtype left = src_pointer[y*w + (x-1)];
Dtype right = src_pointer[y*w + (x+1)];
Dtype top_left = src_pointer[(y-1)*w + x-1];
Dtype top_right = src_pointer[(y-1)*w + x+1];
Dtype bottom_left = src_pointer[(y+1)*w + x-1];
Dtype bottom_right = src_pointer[(y+1)*w + x+1];
if(value > top && value > bottom && value > left && value > right && value > top_left
&& value > bottom_left && value > bottom_right && value > top_right ){
workspace[y*w + x] = 1;
}
else {
workspace[y*w + x] = 0;
}
}
else {
workspace[y*w + x] = 0;
}
}
}
template <typename Dtype>
__global__ void writeResultKernel(int length, int* input, Dtype* src_pointer, Dtype* output, int width, int max_peaks){
__shared__ int local[numThreadsPerBlock+1]; // one more
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(globalIdx < length){
local[threadIdx.x] = input[globalIdx];
if(threadIdx.x == numThreadsPerBlock - 1 && globalIdx != length - 1){
//last thread in the block but not globally last, load one more
local[threadIdx.x+1] = input[globalIdx+1];
}
__syncthreads();
// see difference, except the globally last one
if(globalIdx != length - 1){
if(local[threadIdx.x] != local[threadIdx.x + 1]) {
//means A[globalIdx] == A[globalIdx + 1] as the input[globalIdx]-th repeat
int peak_index = input[globalIdx]; //0-index
int peak_loc = globalIdx;
int peak_loc_x = peak_loc % width;
int peak_loc_y = peak_loc / width;
if(peak_index < max_peaks){ //limitation
//output[input[globalIdx]] = globalIdx;
if (1) {
float x_acc = 0;
float y_acc = 0;
float score_acc = 0;
int count = 0;
for (int dy=-3;dy<4;dy++) {
if ((peak_loc_y+dy)>0 && (peak_loc_y+dy)<width) {
for (int dx=-3;dx<4;dx++) {
if ((peak_loc_x+dx)>0 && (peak_loc_x+dx)<width) {
float score = src_pointer[(peak_loc_y+dy)*width + peak_loc_x+dx];
float x = peak_loc_x+dx;
float y = peak_loc_y+dy;
if (score>0) {
x_acc += x*score;
y_acc += y*score;
score_acc += score;
count += 1;
}
}
}
}
}
output[(peak_index + 1) * 3] = x_acc/score_acc;
output[(peak_index + 1) * 3 + 1] = y_acc/score_acc;
output[(peak_index + 1) * 3 + 2] = src_pointer[peak_loc_y*width + peak_loc_x];
} else {
output[(peak_index + 1) * 3] = peak_loc_x;
output[(peak_index + 1) * 3 + 1] = peak_loc_y;
output[(peak_index + 1) * 3 + 2] = src_pointer[peak_loc_y*width + peak_loc_x];
}
}
}
}
else {
output[0] = input[globalIdx]; //number of peaks
}
}
}
template <typename Dtype>
void NmsLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){
//Forward_cpu(bottom, top);
int num = bottom[0]->shape(0);
//int channel = bottom[0]->shape(1);
int height = bottom[0]->shape(2);
int width = bottom[0]->shape(3);
int offset = height * width;
int offset_dst = (max_peaks_+1)*3;
dim3 threadsPerBlock(numThreadsPerBlock_1d, numThreadsPerBlock_1d);
dim3 numBlocks(updiv(width, threadsPerBlock.x), updiv(height, threadsPerBlock.y));
//std::cout << "channel: " << channel << std::endl;
for(int n = 0; n < num; n++){ // batch
for(int c = 0; c < num_parts_; c++){
//std::cout << "channel: " << c << std::endl;
int* w_pointer1 = workspace.mutable_gpu_data() + n * num_parts_ * offset + c * offset;
Dtype* src = bottom[0]->mutable_gpu_data() + n * num_parts_ * offset + c * offset;
Dtype* dst = top[0]->mutable_gpu_data() + n * num_parts_ * offset_dst + c * offset_dst;
// old model
// if(c==14){
// Dtype* src = bottom[0]->mutable_gpu_data() + n * parts_num * offset + 28 * offset;
// }
nms_register_kernel<<<numBlocks, threadsPerBlock>>>(src, w_pointer1, width, height, threshold_);//[0,0,0,0,1,0,0,0,0,1,0,0,0,0]
//LOG(ERROR) << "register done";
thrust::device_ptr<int> dev_ptr = thrust::device_pointer_cast(w_pointer1);
//LOG(ERROR) << "pointer done";
//debug
// if(c==3){
// char filename[50];
// sprintf(filename, "work%02d.txt", c);
// std::ofstream fout(filename);
// int* w_pointer1_local = workspace.mutable_cpu_data() + n * parts_num * offset + c * offset;
// for(int y = 0; y < height; y++){
// for(int x = 0; x < width; x++){
// fout << w_pointer1_local[y*width + x] << "\t";
// }
// fout<< std::endl;
// }
// fout.close();
// }
thrust::exclusive_scan(dev_ptr, dev_ptr + offset, dev_ptr); //[0,0,0,0,0,1,1,1,1,1,2,2,2,2]
//LOG(ERROR) << "thrust done";
writeResultKernel<<<updiv(offset,numThreadsPerBlock), numThreadsPerBlock>>>(offset, w_pointer1, src, dst, width, max_peaks_);
//LOG(ERROR) << "write done";
}
}
//w_pointer
}
template <typename Dtype>
void NmsLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
NOT_IMPLEMENTED;
}
INSTANTIATE_LAYER_GPU_FUNCS(NmsLayer);
} // namespace caffe
|
114f23bff8a085341cec518c8ddd54548c77fe2c.hip
|
// !!! This is a file automatically generated by hipify!!!
// Andrew Gloster
// February 2019
// Program to solve the 2D Cahn-Hilliard equation on a periodic domain using the ADI method
// Copyright 2019 Andrew Gloster
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ---------------------------------------------------------------------
// Standard Libraries and Headers
// ---------------------------------------------------------------------
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
#include <rocblas.h>
#include "hdf5.h"
#include <time.h>
// ---------------------------------------------------------------------
// Programmer Libraries and Headers
// ---------------------------------------------------------------------
#include "../../cuSten/cuSten.h"
#include "cuPentBatch.h"
#include "BatchHyper.h"
// ---------------------------------------------------------------------
// MACROS
// ---------------------------------------------------------------------
// Block sizes for finding RHS
#define BLOCK_X_FUN 8
#define BLOCK_Y_FUN 8
#define BLOCK_X 32
#define BLOCK_Y 32
// Block size for inverting
#define BLOCK_INV 64
//---------------------------------------------------------------------
// Static functions for use in main program
//---------------------------------------------------------------------
// Find cBar for differencing
__global__ static void findCBar(double* cOld, double* cCurr, double* cBar, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Find cBar
cBar[index] = 2.0 * cCurr[index] - cOld[index];
}
// Find the full combined RHS
__global__ static void findRHS(double* cOld, double* cCurr, double* cHalf, double* cNonLinRHS, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Set the RHS for inversion
cHalf[index] += - (2.0 / 3.0) * (cCurr[index] - cOld[index]) + cNonLinRHS[index];
// Set cOld to cCurr
cOld[index] = cCurr[index];
}
// Recover the updated timestep
__global__ static void findNew(double* cCurr, double* cBar, double* cHalf, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Recover the new data
cCurr[index] = cBar[index] + cHalf[index];
}
// Print out to hdf5
static herr_t Print_Out(double* data, double time, int nx, int ny){
char str_num[1024];
char str_file[2048];
snprintf(str_num, 20, "%0.10lf", time);
snprintf(str_file, 100, "%s%s%s", "output/cahn_hilliard_", str_num, ".nc");
// Print out matrix
hsize_t dims[2];
hid_t file_id, dataset_id, dataspace_id;
herr_t status;
file_id = H5Fcreate(str_file, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
dims[0] = nx;
dims[1] = ny;
dataspace_id = H5Screate_simple(2, dims, NULL);
dataset_id = H5Dcreate2(file_id, "/c", H5T_NATIVE_DOUBLE, dataspace_id,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
status = H5Dwrite(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
/* End access to the dataset and release resources used by it. */
status = H5Dclose(dataset_id);
/* Terminate access to the data space. */
status = H5Sclose(dataspace_id);
/* Close the file. */
status = H5Fclose(file_id);
return status;
}
static double double_rand(double min, double max)
{
double scale = (double) rand() / (double) RAND_MAX; /* [0, 1.0] */
return min + scale * ( max - min ); /* [min, max] */
}
//---------------------------------------------------------------------
// Function to calculate the non linear RHS
//---------------------------------------------------------------------
/*! \var typedef double (*devArg1X)(double*, double*, int);
\brief The function pointer containing the user defined function to be applied <br>
Input 1: The pointer to input data to the function <br>
Input 2: The pointer to the coefficients provided by the user <br>
Input 3: The current index position (centre of the stencil to be applied) <br>
Input 4: Value to be used to jump between rows. (j + 1, j - 1 etc.) <br>
Input 5: Size of stencil in x direction <br>
Input 6: Size of stencil in y direction
*/
typedef double (*devArg1XY)(double*, double*, int, int, int, int);
__inline__ __device__ double nonLinRHS(double* data, double* coe, int loc, int jump, int nx, int ny)
{
double result = 0.0;
double current;
int temp;
int count = 0;
#pragma unroll
for (int j = 0; j < ny; j++)
{
temp = loc + j * jump;
#pragma unroll
for (int i = 0; i < nx; i++)
{
current = data[temp + i];
result += coe[count] * ((current * current * current) - current);
count ++;
}
}
return result;
}
__device__ devArg1XY devFunc = nonLinRHS;
// ---------------------------------------------------------------------
// Begin main program
// ---------------------------------------------------------------------
int main()
{
//----------------------------------------
// Simulation paramters
//----------------------------------------
// Set coefficients
double D = 1.0;
double gamma = 0.01;
// Set grid spacing -- Use a square grid -- thus all nx = ny
int nx = 512;
// Set the size of the reduced matrix
int size = nx - 2;
// Set timing
double T = 100.0;
// Domain size
double lx = 2.0 * M_PI;
// Spacings
double dx = lx / nx;
double dt = 0.1 * dx;
// Buffer used for error checking
char msgStringBuffer[1024];
// How often to output
int print = 100;
// What device to compute on
int computeDevice = 0;
//----------------------------------------
// Set up GPU grids
//----------------------------------------
// Set for inversion
int gridInv = (nx % BLOCK_INV == 0) ? (nx / BLOCK_INV) : (nx / BLOCK_INV + 1);
dim3 blockDimInv(BLOCK_INV);
dim3 gridDimInv(gridInv);
// Set for any standard grid
int xGrid = (nx % BLOCK_X == 0) ? (nx / BLOCK_X) : (nx / BLOCK_X + 1);
int yGrid = (nx % BLOCK_Y == 0) ? (nx / BLOCK_Y) : (nx / BLOCK_Y + 1);
dim3 blockDim(BLOCK_X, BLOCK_Y);
dim3 gridDim(xGrid, yGrid);
//----------------------------------------
// Memory allocation
//----------------------------------------
// Old timestep
double* cOld;
hipMallocManaged(&cOld, nx * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for cOld");
checkError(msgStringBuffer);
// Current timestep
double* cCurr;
hipMallocManaged(&cCurr, nx * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for cCurr");
checkError(msgStringBuffer);
// New timestep
double* cNonLinRHS;
hipMallocManaged(&cNonLinRHS, nx * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for cNonLinRHS");
checkError(msgStringBuffer);
// Intermediate step
double* cBar;
hipMallocManaged(&cBar, nx * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for cBar");
checkError(msgStringBuffer);
// Intermediate step
double* cHalf;
hipMallocManaged(&cHalf, nx * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for cBar");
checkError(msgStringBuffer);
//----------------------------------------
// Initial Condition
//----------------------------------------
// Indexing
int temp, index;
for (int j = 0; j < nx; j++)
{
temp = j * nx;
for (int i = 0; i < nx; i++)
{
index = temp + i;
cOld[index] = double_rand(- 0.1, 0.1);
cCurr[index] = cOld[index];
}
}
//----------------------------------------
// Allocate the memory for the LHS
//----------------------------------------
// Lowest diagonal
double* ds;
hipMallocManaged(&ds, size * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for ds");
checkError(msgStringBuffer);
// Lower diagonal
double* dl;
hipMallocManaged(&dl, size * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for dl");
checkError(msgStringBuffer);
// Main daigonal
double* diag;
hipMallocManaged(&diag, size * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for diag");
checkError(msgStringBuffer);
// Upper diagonal
double* du;
hipMallocManaged(&du, size * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for du");
checkError(msgStringBuffer);
// Highest diagonal
double* dw;
hipMallocManaged(&dw, size * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for dw");
checkError(msgStringBuffer);
//----------------------------------------
// Set up cuBLAS
//----------------------------------------
// Set a handle
hipblasHandle_t handleBLAS;
// Set a status
hipblasStatus_t statusBLAS;
// Create the handle
statusBLAS = hipblasCreate(&handleBLAS);
// Set constants
const double alpha = 1.0;
const double beta = 0.0;
//----------------------------------------
// Set coefficients
//----------------------------------------
// Linear coefficient
double simgaLin = 2.0 * dt * D * gamma / (3.0 * (pow(dx, 4.0)));
// Set the diagonal elements
double a = simgaLin;
double b = - 4 * simgaLin;
double c = 1 + 6 * simgaLin;
double d = - 4 * simgaLin;
double e = simgaLin;
//----------------------------------------
// Set the matrix
//----------------------------------------
// Set the LHS for inversion
hipLaunchKernelGGL(( setMultiLHS), dim3(gridDim), dim3(blockDim), 0, 0, ds, dl, diag, du, dw, a, b, c, d, e, size, nx);
sprintf(msgStringBuffer, "Failed to set LHS matrix for initial timestep");
checkError(msgStringBuffer);
// Ensure matrix is set
hipDeviceSynchronize();
// Pre-factor the LHS
hipLaunchKernelGGL(( pentFactorBatch), dim3(gridDimInv), dim3(blockDimInv), 0, 0, ds, dl, diag, du, dw, size, nx);
sprintf(msgStringBuffer, "Failed to pre factor LHS matrix for initial timestep");
checkError(msgStringBuffer);
// Ensure matrix is factorised
hipDeviceSynchronize();
//----------------------------------------
// Find omega and set inverses
//----------------------------------------
double* omega = (double*)malloc(4 * sizeof(double));
if (omega == NULL)
{
printf("%s \n", "Failed to malloc omega");
}
double* inv1Single = (double*)malloc(size * sizeof(double));
if (inv1Single == NULL)
{
printf("%s \n", "Failed to malloc inv1Single");
}
double* inv2Single = (double*)malloc(size * sizeof(double));
if (inv2Single == NULL)
{
printf("%s \n", "Failed to malloc inv2Single");
}
double* inv1Multi;
hipMallocManaged(&inv1Multi, nx * size * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for inv1Multi");
checkError(msgStringBuffer);
double* inv2Multi;
hipMallocManaged(&inv2Multi, nx * size * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for inv2Multi");
checkError(msgStringBuffer);
findOmega(omega, inv1Single, inv2Single, a, b, c, d, e, nx);
for (int j = 0; j < size; j++)
{
temp = j * nx;
for (int i = 0; i < nx; i++)
{
index = temp + i;
inv1Multi[index] = inv1Single[j];
inv2Multi[index] = inv2Single[j];
}
}
//----------------------------------------
// Set compute for linear RHS
//----------------------------------------
int linHoriz = 5;
int linLeft = 2;
int linRight = 2;
int linVert = 5;
int linTop = 2;
int linBottom = 2;
double* weightsLinRHS;
hipMallocManaged(&weightsLinRHS, linHoriz * linVert * sizeof(double));
weightsLinRHS[0] = 0.0; weightsLinRHS[1] = 0.0; weightsLinRHS[2] = - 1.0 * simgaLin; weightsLinRHS[3] = 0.0; weightsLinRHS[4] = 0.0;
weightsLinRHS[5] = 0.0; weightsLinRHS[6] = - 2.0 * simgaLin; weightsLinRHS[7] = 8.0 * simgaLin; weightsLinRHS[8] = - 2.0 * simgaLin; weightsLinRHS[9] = 0.0;
weightsLinRHS[10] = - 1.0 * simgaLin; weightsLinRHS[11] = 8.0 * simgaLin; weightsLinRHS[12] = - 20.0 * simgaLin; weightsLinRHS[13] = 8.0 * simgaLin; weightsLinRHS[14] = - 1.0 * simgaLin;
weightsLinRHS[15] = 0.0; weightsLinRHS[16] = - 2.0 * simgaLin; weightsLinRHS[17] = 8.0 * simgaLin; weightsLinRHS[18] = - 2.0 * simgaLin; weightsLinRHS[19] = 0.0;
weightsLinRHS[20] = 0.0; weightsLinRHS[21] = 0.0; weightsLinRHS[22] = -1.0 * simgaLin; weightsLinRHS[23] = 0.0; weightsLinRHS[24] = 0.0;
// Set up the compute device structs
cuSten_t linRHS;
// Set the number of tiles
int linInitTiles = 1;
// Initialise the instance of the stencil
cuStenCreate2DXYp(&linRHS, computeDevice, linInitTiles, nx, nx, BLOCK_X, BLOCK_Y, cHalf, cBar, weightsLinRHS, linHoriz, linLeft, linRight, linVert, linTop, linBottom);
// Ensure compute type created
hipDeviceSynchronize();
//----------------------------------------
// Set up computation of non-linear RHS
//----------------------------------------
// Set up the compute device structs
cuSten_t nonLinCompute;
// Synchronise to ensure everything initialised
hipDeviceSynchronize();
// Copy the function to device memory
double* func;
hipMemcpyFromSymbol(&func, devFunc, sizeof(devArg1XY));
// Set new non linear coefficient
double sigmaNonLin = (dt / 3.0) * D * (2.0 / pow(dx, 2.0));
int numStenHoriz = 3;
int numStenLeft = 1;
int numStenRight = 1;
int numStenVert = 3;
int numStenTop = 1;
int numStenBottom = 1;
int nonLinTiles = 1;
double* coe;
hipMallocManaged(&coe, numStenHoriz * numStenVert * sizeof(double));
coe[0] = 0.0; coe[1] = 1.0 * sigmaNonLin; coe[2] = 0.0;
coe[3] = 1.0 * sigmaNonLin; coe[4] = - 4.0 * sigmaNonLin; coe[5] = 1.0 * sigmaNonLin;
coe[6] = 0.0; coe[7] = 1.0 * sigmaNonLin; coe[8] = 0.0;
// Initialise the instance of the stencil
cuStenCreate2DXYpFun(&nonLinCompute, computeDevice, nonLinTiles, nx, nx, BLOCK_X_FUN, BLOCK_Y_FUN, cNonLinRHS, cCurr, coe, numStenHoriz, numStenLeft, numStenRight, numStenVert, numStenTop, numStenBottom, func);
// Synchronise to ensure everything initialised
hipDeviceSynchronize();
//----------------------------------------
// Begin timestepping
//----------------------------------------
double time = dt;
int timeCount = 1;
while (time < T)
{
// Set cBar
hipLaunchKernelGGL(( findCBar), dim3(gridDim), dim3(blockDim), 0, 0, cOld, cCurr, cBar, nx);
// Ensure compute type created
hipDeviceSynchronize();
// Compute the non-linear RHS
cuStenCompute2DXYpFun(&nonLinCompute, 0);
// Compute the linear RHS
cuStenCompute2DXYp(&linRHS, 0);
// Ensure compute type created
hipDeviceSynchronize();
// Find the full RHS and then set cOld to cCurrent
hipLaunchKernelGGL(( findRHS), dim3(gridDim), dim3(blockDim), 0, 0, cOld, cCurr, cHalf, cNonLinRHS, nx);
// Ensure compute type created
hipDeviceSynchronize();
// Transpose the result
statusBLAS = hipblasDgeam(handleBLAS, HIPBLAS_OP_T, HIPBLAS_OP_T, nx, nx, &alpha, cHalf, nx, &beta, NULL, nx, cCurr, nx);
if (statusBLAS != HIPBLAS_STATUS_SUCCESS) {
printf("Unable to compute transpose \n");
return EXIT_FAILURE;
}
// Ensure transpose completed
hipDeviceSynchronize();
// Invert the matrix
cyclicInv(ds, dl, diag, du, dw, inv1Multi, inv2Multi, omega, cCurr, a, b, d, e, BLOCK_INV, BLOCK_X, BLOCK_Y, size, nx);
// Transpose the result
statusBLAS = hipblasDgeam(handleBLAS, HIPBLAS_OP_T, HIPBLAS_OP_T, nx, nx, &alpha, cCurr, nx, &beta, NULL, nx, cHalf, nx);
if (statusBLAS != HIPBLAS_STATUS_SUCCESS) {
printf("Unable to compute transpose \n");
return EXIT_FAILURE;
}
// Ensure transpose completed
hipDeviceSynchronize();
// Invert the matrix
cyclicInv(ds, dl, diag, du, dw, inv1Multi, inv2Multi, omega, cHalf, a, b, d, e, BLOCK_INV, BLOCK_X, BLOCK_Y, size, nx);
// Ensure computation completed
hipDeviceSynchronize();
hipLaunchKernelGGL(( findNew), dim3(gridDim), dim3(blockDim), 0, 0, cCurr, cBar, cHalf, nx);
// Ensure computation completed
hipDeviceSynchronize();
// Add on the next time
time += dt;
timeCount += 1;
printf("%lf \n", time);
if (timeCount % print == 0)
{
Print_Out(cCurr, time, nx, nx);
}
}
// Ensure computation completed
hipDeviceSynchronize();
Print_Out(cCurr, time, nx, nx);
//----------------------------------------
// Free memory at the end
//----------------------------------------
free(omega);
free(inv1Single);
free(inv2Single);
cuStenDestroy2DXYp(&linRHS);
cuStenDestroy2DXYpFun(&nonLinCompute);
hipFree(inv1Multi);
hipFree(inv2Multi);
hipFree(cOld);
hipFree(cNonLinRHS);
hipFree(cBar);
hipFree(cHalf);
hipFree(ds);
hipFree(dl);
hipFree(diag);
hipFree(du);
hipFree(dw);
}
// ---------------------------------------------------------------------
// End main program
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// End of File
// ---------------------------------------------------------------------
|
114f23bff8a085341cec518c8ddd54548c77fe2c.cu
|
// Andrew Gloster
// February 2019
// Program to solve the 2D Cahn-Hilliard equation on a periodic domain using the ADI method
// Copyright 2019 Andrew Gloster
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ---------------------------------------------------------------------
// Standard Libraries and Headers
// ---------------------------------------------------------------------
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
#include <cublas_v2.h>
#include "hdf5.h"
#include <time.h>
// ---------------------------------------------------------------------
// Programmer Libraries and Headers
// ---------------------------------------------------------------------
#include "../../cuSten/cuSten.h"
#include "cuPentBatch.h"
#include "BatchHyper.h"
// ---------------------------------------------------------------------
// MACROS
// ---------------------------------------------------------------------
// Block sizes for finding RHS
#define BLOCK_X_FUN 8
#define BLOCK_Y_FUN 8
#define BLOCK_X 32
#define BLOCK_Y 32
// Block size for inverting
#define BLOCK_INV 64
//---------------------------------------------------------------------
// Static functions for use in main program
//---------------------------------------------------------------------
// Find cBar for differencing
__global__ static void findCBar(double* cOld, double* cCurr, double* cBar, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Find cBar
cBar[index] = 2.0 * cCurr[index] - cOld[index];
}
// Find the full combined RHS
__global__ static void findRHS(double* cOld, double* cCurr, double* cHalf, double* cNonLinRHS, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Set the RHS for inversion
cHalf[index] += - (2.0 / 3.0) * (cCurr[index] - cOld[index]) + cNonLinRHS[index];
// Set cOld to cCurr
cOld[index] = cCurr[index];
}
// Recover the updated timestep
__global__ static void findNew(double* cCurr, double* cBar, double* cHalf, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Recover the new data
cCurr[index] = cBar[index] + cHalf[index];
}
// Print out to hdf5
static herr_t Print_Out(double* data, double time, int nx, int ny){
char str_num[1024];
char str_file[2048];
snprintf(str_num, 20, "%0.10lf", time);
snprintf(str_file, 100, "%s%s%s", "output/cahn_hilliard_", str_num, ".nc");
// Print out matrix
hsize_t dims[2];
hid_t file_id, dataset_id, dataspace_id;
herr_t status;
file_id = H5Fcreate(str_file, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
dims[0] = nx;
dims[1] = ny;
dataspace_id = H5Screate_simple(2, dims, NULL);
dataset_id = H5Dcreate2(file_id, "/c", H5T_NATIVE_DOUBLE, dataspace_id,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
status = H5Dwrite(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
/* End access to the dataset and release resources used by it. */
status = H5Dclose(dataset_id);
/* Terminate access to the data space. */
status = H5Sclose(dataspace_id);
/* Close the file. */
status = H5Fclose(file_id);
return status;
}
static double double_rand(double min, double max)
{
double scale = (double) rand() / (double) RAND_MAX; /* [0, 1.0] */
return min + scale * ( max - min ); /* [min, max] */
}
//---------------------------------------------------------------------
// Function to calculate the non linear RHS
//---------------------------------------------------------------------
/*! \var typedef double (*devArg1X)(double*, double*, int);
\brief The function pointer containing the user defined function to be applied <br>
Input 1: The pointer to input data to the function <br>
Input 2: The pointer to the coefficients provided by the user <br>
Input 3: The current index position (centre of the stencil to be applied) <br>
Input 4: Value to be used to jump between rows. (j + 1, j - 1 etc.) <br>
Input 5: Size of stencil in x direction <br>
Input 6: Size of stencil in y direction
*/
typedef double (*devArg1XY)(double*, double*, int, int, int, int);
__inline__ __device__ double nonLinRHS(double* data, double* coe, int loc, int jump, int nx, int ny)
{
double result = 0.0;
double current;
int temp;
int count = 0;
#pragma unroll
for (int j = 0; j < ny; j++)
{
temp = loc + j * jump;
#pragma unroll
for (int i = 0; i < nx; i++)
{
current = data[temp + i];
result += coe[count] * ((current * current * current) - current);
count ++;
}
}
return result;
}
__device__ devArg1XY devFunc = nonLinRHS;
// ---------------------------------------------------------------------
// Begin main program
// ---------------------------------------------------------------------
int main()
{
//----------------------------------------
// Simulation paramters
//----------------------------------------
// Set coefficients
double D = 1.0;
double gamma = 0.01;
// Set grid spacing -- Use a square grid -- thus all nx = ny
int nx = 512;
// Set the size of the reduced matrix
int size = nx - 2;
// Set timing
double T = 100.0;
// Domain size
double lx = 2.0 * M_PI;
// Spacings
double dx = lx / nx;
double dt = 0.1 * dx;
// Buffer used for error checking
char msgStringBuffer[1024];
// How often to output
int print = 100;
// What device to compute on
int computeDevice = 0;
//----------------------------------------
// Set up GPU grids
//----------------------------------------
// Set for inversion
int gridInv = (nx % BLOCK_INV == 0) ? (nx / BLOCK_INV) : (nx / BLOCK_INV + 1);
dim3 blockDimInv(BLOCK_INV);
dim3 gridDimInv(gridInv);
// Set for any standard grid
int xGrid = (nx % BLOCK_X == 0) ? (nx / BLOCK_X) : (nx / BLOCK_X + 1);
int yGrid = (nx % BLOCK_Y == 0) ? (nx / BLOCK_Y) : (nx / BLOCK_Y + 1);
dim3 blockDim(BLOCK_X, BLOCK_Y);
dim3 gridDim(xGrid, yGrid);
//----------------------------------------
// Memory allocation
//----------------------------------------
// Old timestep
double* cOld;
cudaMallocManaged(&cOld, nx * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for cOld");
checkError(msgStringBuffer);
// Current timestep
double* cCurr;
cudaMallocManaged(&cCurr, nx * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for cCurr");
checkError(msgStringBuffer);
// New timestep
double* cNonLinRHS;
cudaMallocManaged(&cNonLinRHS, nx * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for cNonLinRHS");
checkError(msgStringBuffer);
// Intermediate step
double* cBar;
cudaMallocManaged(&cBar, nx * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for cBar");
checkError(msgStringBuffer);
// Intermediate step
double* cHalf;
cudaMallocManaged(&cHalf, nx * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for cBar");
checkError(msgStringBuffer);
//----------------------------------------
// Initial Condition
//----------------------------------------
// Indexing
int temp, index;
for (int j = 0; j < nx; j++)
{
temp = j * nx;
for (int i = 0; i < nx; i++)
{
index = temp + i;
cOld[index] = double_rand(- 0.1, 0.1);
cCurr[index] = cOld[index];
}
}
//----------------------------------------
// Allocate the memory for the LHS
//----------------------------------------
// Lowest diagonal
double* ds;
cudaMallocManaged(&ds, size * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for ds");
checkError(msgStringBuffer);
// Lower diagonal
double* dl;
cudaMallocManaged(&dl, size * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for dl");
checkError(msgStringBuffer);
// Main daigonal
double* diag;
cudaMallocManaged(&diag, size * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for diag");
checkError(msgStringBuffer);
// Upper diagonal
double* du;
cudaMallocManaged(&du, size * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for du");
checkError(msgStringBuffer);
// Highest diagonal
double* dw;
cudaMallocManaged(&dw, size * nx * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for dw");
checkError(msgStringBuffer);
//----------------------------------------
// Set up cuBLAS
//----------------------------------------
// Set a handle
cublasHandle_t handleBLAS;
// Set a status
cublasStatus_t statusBLAS;
// Create the handle
statusBLAS = cublasCreate(&handleBLAS);
// Set constants
const double alpha = 1.0;
const double beta = 0.0;
//----------------------------------------
// Set coefficients
//----------------------------------------
// Linear coefficient
double simgaLin = 2.0 * dt * D * gamma / (3.0 * (pow(dx, 4.0)));
// Set the diagonal elements
double a = simgaLin;
double b = - 4 * simgaLin;
double c = 1 + 6 * simgaLin;
double d = - 4 * simgaLin;
double e = simgaLin;
//----------------------------------------
// Set the matrix
//----------------------------------------
// Set the LHS for inversion
setMultiLHS<<<gridDim, blockDim>>>(ds, dl, diag, du, dw, a, b, c, d, e, size, nx);
sprintf(msgStringBuffer, "Failed to set LHS matrix for initial timestep");
checkError(msgStringBuffer);
// Ensure matrix is set
cudaDeviceSynchronize();
// Pre-factor the LHS
pentFactorBatch<<<gridDimInv, blockDimInv>>>(ds, dl, diag, du, dw, size, nx);
sprintf(msgStringBuffer, "Failed to pre factor LHS matrix for initial timestep");
checkError(msgStringBuffer);
// Ensure matrix is factorised
cudaDeviceSynchronize();
//----------------------------------------
// Find omega and set inverses
//----------------------------------------
double* omega = (double*)malloc(4 * sizeof(double));
if (omega == NULL)
{
printf("%s \n", "Failed to malloc omega");
}
double* inv1Single = (double*)malloc(size * sizeof(double));
if (inv1Single == NULL)
{
printf("%s \n", "Failed to malloc inv1Single");
}
double* inv2Single = (double*)malloc(size * sizeof(double));
if (inv2Single == NULL)
{
printf("%s \n", "Failed to malloc inv2Single");
}
double* inv1Multi;
cudaMallocManaged(&inv1Multi, nx * size * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for inv1Multi");
checkError(msgStringBuffer);
double* inv2Multi;
cudaMallocManaged(&inv2Multi, nx * size * sizeof(double));
sprintf(msgStringBuffer, "Failed to allocate memory for inv2Multi");
checkError(msgStringBuffer);
findOmega(omega, inv1Single, inv2Single, a, b, c, d, e, nx);
for (int j = 0; j < size; j++)
{
temp = j * nx;
for (int i = 0; i < nx; i++)
{
index = temp + i;
inv1Multi[index] = inv1Single[j];
inv2Multi[index] = inv2Single[j];
}
}
//----------------------------------------
// Set compute for linear RHS
//----------------------------------------
int linHoriz = 5;
int linLeft = 2;
int linRight = 2;
int linVert = 5;
int linTop = 2;
int linBottom = 2;
double* weightsLinRHS;
cudaMallocManaged(&weightsLinRHS, linHoriz * linVert * sizeof(double));
weightsLinRHS[0] = 0.0; weightsLinRHS[1] = 0.0; weightsLinRHS[2] = - 1.0 * simgaLin; weightsLinRHS[3] = 0.0; weightsLinRHS[4] = 0.0;
weightsLinRHS[5] = 0.0; weightsLinRHS[6] = - 2.0 * simgaLin; weightsLinRHS[7] = 8.0 * simgaLin; weightsLinRHS[8] = - 2.0 * simgaLin; weightsLinRHS[9] = 0.0;
weightsLinRHS[10] = - 1.0 * simgaLin; weightsLinRHS[11] = 8.0 * simgaLin; weightsLinRHS[12] = - 20.0 * simgaLin; weightsLinRHS[13] = 8.0 * simgaLin; weightsLinRHS[14] = - 1.0 * simgaLin;
weightsLinRHS[15] = 0.0; weightsLinRHS[16] = - 2.0 * simgaLin; weightsLinRHS[17] = 8.0 * simgaLin; weightsLinRHS[18] = - 2.0 * simgaLin; weightsLinRHS[19] = 0.0;
weightsLinRHS[20] = 0.0; weightsLinRHS[21] = 0.0; weightsLinRHS[22] = -1.0 * simgaLin; weightsLinRHS[23] = 0.0; weightsLinRHS[24] = 0.0;
// Set up the compute device structs
cuSten_t linRHS;
// Set the number of tiles
int linInitTiles = 1;
// Initialise the instance of the stencil
cuStenCreate2DXYp(&linRHS, computeDevice, linInitTiles, nx, nx, BLOCK_X, BLOCK_Y, cHalf, cBar, weightsLinRHS, linHoriz, linLeft, linRight, linVert, linTop, linBottom);
// Ensure compute type created
cudaDeviceSynchronize();
//----------------------------------------
// Set up computation of non-linear RHS
//----------------------------------------
// Set up the compute device structs
cuSten_t nonLinCompute;
// Synchronise to ensure everything initialised
cudaDeviceSynchronize();
// Copy the function to device memory
double* func;
cudaMemcpyFromSymbol(&func, devFunc, sizeof(devArg1XY));
// Set new non linear coefficient
double sigmaNonLin = (dt / 3.0) * D * (2.0 / pow(dx, 2.0));
int numStenHoriz = 3;
int numStenLeft = 1;
int numStenRight = 1;
int numStenVert = 3;
int numStenTop = 1;
int numStenBottom = 1;
int nonLinTiles = 1;
double* coe;
cudaMallocManaged(&coe, numStenHoriz * numStenVert * sizeof(double));
coe[0] = 0.0; coe[1] = 1.0 * sigmaNonLin; coe[2] = 0.0;
coe[3] = 1.0 * sigmaNonLin; coe[4] = - 4.0 * sigmaNonLin; coe[5] = 1.0 * sigmaNonLin;
coe[6] = 0.0; coe[7] = 1.0 * sigmaNonLin; coe[8] = 0.0;
// Initialise the instance of the stencil
cuStenCreate2DXYpFun(&nonLinCompute, computeDevice, nonLinTiles, nx, nx, BLOCK_X_FUN, BLOCK_Y_FUN, cNonLinRHS, cCurr, coe, numStenHoriz, numStenLeft, numStenRight, numStenVert, numStenTop, numStenBottom, func);
// Synchronise to ensure everything initialised
cudaDeviceSynchronize();
//----------------------------------------
// Begin timestepping
//----------------------------------------
double time = dt;
int timeCount = 1;
while (time < T)
{
// Set cBar
findCBar<<<gridDim, blockDim>>>(cOld, cCurr, cBar, nx);
// Ensure compute type created
cudaDeviceSynchronize();
// Compute the non-linear RHS
cuStenCompute2DXYpFun(&nonLinCompute, 0);
// Compute the linear RHS
cuStenCompute2DXYp(&linRHS, 0);
// Ensure compute type created
cudaDeviceSynchronize();
// Find the full RHS and then set cOld to cCurrent
findRHS<<<gridDim, blockDim>>>(cOld, cCurr, cHalf, cNonLinRHS, nx);
// Ensure compute type created
cudaDeviceSynchronize();
// Transpose the result
statusBLAS = cublasDgeam(handleBLAS, CUBLAS_OP_T, CUBLAS_OP_T, nx, nx, &alpha, cHalf, nx, &beta, NULL, nx, cCurr, nx);
if (statusBLAS != CUBLAS_STATUS_SUCCESS) {
printf("Unable to compute transpose \n");
return EXIT_FAILURE;
}
// Ensure transpose completed
cudaDeviceSynchronize();
// Invert the matrix
cyclicInv(ds, dl, diag, du, dw, inv1Multi, inv2Multi, omega, cCurr, a, b, d, e, BLOCK_INV, BLOCK_X, BLOCK_Y, size, nx);
// Transpose the result
statusBLAS = cublasDgeam(handleBLAS, CUBLAS_OP_T, CUBLAS_OP_T, nx, nx, &alpha, cCurr, nx, &beta, NULL, nx, cHalf, nx);
if (statusBLAS != CUBLAS_STATUS_SUCCESS) {
printf("Unable to compute transpose \n");
return EXIT_FAILURE;
}
// Ensure transpose completed
cudaDeviceSynchronize();
// Invert the matrix
cyclicInv(ds, dl, diag, du, dw, inv1Multi, inv2Multi, omega, cHalf, a, b, d, e, BLOCK_INV, BLOCK_X, BLOCK_Y, size, nx);
// Ensure computation completed
cudaDeviceSynchronize();
findNew<<<gridDim, blockDim>>>(cCurr, cBar, cHalf, nx);
// Ensure computation completed
cudaDeviceSynchronize();
// Add on the next time
time += dt;
timeCount += 1;
printf("%lf \n", time);
if (timeCount % print == 0)
{
Print_Out(cCurr, time, nx, nx);
}
}
// Ensure computation completed
cudaDeviceSynchronize();
Print_Out(cCurr, time, nx, nx);
//----------------------------------------
// Free memory at the end
//----------------------------------------
free(omega);
free(inv1Single);
free(inv2Single);
cuStenDestroy2DXYp(&linRHS);
cuStenDestroy2DXYpFun(&nonLinCompute);
cudaFree(inv1Multi);
cudaFree(inv2Multi);
cudaFree(cOld);
cudaFree(cNonLinRHS);
cudaFree(cBar);
cudaFree(cHalf);
cudaFree(ds);
cudaFree(dl);
cudaFree(diag);
cudaFree(du);
cudaFree(dw);
}
// ---------------------------------------------------------------------
// End main program
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// End of File
// ---------------------------------------------------------------------
|
7caab4fe1b450299212d5d37c752d546bd5a7dfe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "grid_mpi.h"
#include "data_util.h"
#include "macro.h"
//#include "buffer_cuda.h"
#include <string.h>
#include <iostream>
#include <stdio.h>
using namespace std;
size_t GridMPI::CalcHaloSize(int dim, unsigned width)
{
IndexArray halo_size = my_real_size_;
halo_size[dim] = width;
return halo_size.accumulate(num_dims_);
}
GridMPI::GridMPI(int unit_size, int num_dims,
const IndexArray &size,
const IndexArray &my_offset,
const IndexArray &my_size,
const Width2 &halo):
Grid(unit_size, num_dims, size),
my_offset_(my_offset),
my_size_(my_size),
halo_(halo),
halo_self_fw_(NULL),
halo_self_bw_(NULL),
halo_peer_fw_(NULL),
halo_peer_bw_(NULL)
{
my_real_size_ = my_size_;
my_real_offset_ = my_offset_;
for(int i = 0; i < num_dims; i++)
{
my_real_size_[i] += halo.fw[i] + halo.bw[i];
my_real_offset_[i] -= halo.bw[i];
}
}
GridMPI *GridMPI::Create(
int unit_size,
int num_dims, const IndexArray &size,
const IndexArray &local_offset,
const IndexArray &local_size,
const Width2 &halo)
{
GridMPI *g = new GridMPI(
unit_size,
num_dims, size,
local_offset,
local_size,
halo);
g->InitBuffer();
return g;
}
//we are using double buffering
void GridMPI::InitBuffer()
{
data_buffer_[0] = new BufferHost();
data_buffer_[0]->Allocate(num_dims_, unit_size_, my_real_size_);
data_[0] = (char*)data_buffer_[0]->Get();
data_buffer_[1] = new BufferHost();
data_buffer_[1]->Allocate(num_dims_, unit_size_, my_real_size_);
data_[1] = (char*)data_buffer_[1]->Get();
InitHaloBuffers();
}
void GridMPI::InitHaloBuffers()
{
halo_self_fw_ = new BufferHost*[num_dims_];
halo_self_bw_ = new BufferHost*[num_dims_];
halo_peer_fw_ = new BufferHost*[num_dims_];
halo_peer_bw_ = new BufferHost*[num_dims_];
for (int i = 0; i < num_dims_; ++i)
{
halo_self_fw_[i] = halo_self_bw_[i] = NULL;
halo_peer_fw_[i] = halo_peer_bw_[i] = NULL;
if (halo_.fw[i])
{
halo_self_fw_[i] = new BufferHost();
halo_peer_fw_[i] = new BufferHost();
IndexArray size = my_real_size_;
size[i] = halo_.fw[i];
halo_self_fw_[i]->Allocate(num_dims_, unit_size_, size);
halo_peer_fw_[i]->Allocate(num_dims_, unit_size_, size);
//printf("halo: #######%ld\n", halo_peer_fw_[i]);
}
if (halo_.bw[i])
{
IndexArray size = my_real_size_;
size[i] = halo_.bw[i];
halo_self_bw_[i]= new BufferHost();
halo_peer_bw_[i]= new BufferHost();
halo_self_bw_[i]->Allocate(num_dims_, unit_size_, size);
halo_peer_bw_[i]->Allocate(num_dims_, unit_size_, size);
}
}
printf("Initting.......................\n");
//IndexArray copy_size = my_real_size_;
//copy_size[num_dims_-1] = halo_.bw[num_dims_-1];
//cout<<"&&&&&&&&&&&&my real size: "<<copy_size<<endl;
//BufferCUDAHost *copy_buffer_ = new BufferCUDAHost();
//copy_buffer_ -> Allocate(num_dims_, unit_size_, copy_size);
//delete copy_buffer_;
//IndexArray copy_size = my_real_size_;
//copy_size[num_dims_-1] = halo_.bw[num_dims_-1];
//cout<<"&&&&&&&&&&&&my real size: "<<copy_size<<endl;
//BufferCUDAHost *copy_buffer_ = new BufferCUDAHost();
//size_t s = copy_size.accumulate(num_dims_);
//if (s == 0) exit(-1);
//size_t linear_size = GetLinearSize(num_dims_, unit_size_, copy_size);
////printf("&&&&&&linear size: %ld, elm_size_: %ld&&&&&&\n", linear_size, elm_size_);
////cout<<"multi dim size: "<<size<<endl;
//void *ptr = NULL;
//CUDA_SAFE_CALL(hipHostMalloc(&ptr,
//linear_size,
//hipHostMallocPortable|hipHostMallocMapped));
//printf("linear size: %ld ptr: %ld\n", linear_size, ptr);
//copy_buffer_->buf_ = ptr;
/////copy_buffer_ -> Allocate(num_dims_, unit_size_, copy_size);
//delete copy_buffer_;
//printf("^^^^^^^^^^^^^^^^^^buffer pointer: %ld\n", copy_buffer_->Get());
//printf("^^^^^^^^^^^^^^^^^^Device buffer pointer: %ld\n", copy_buffer_->DeviceBuf());
}
//copy data from Halo to grid
void GridMPI::CopyinHalo(int dim, unsigned width, bool fw)
{
//highest dim does not need copying
if(dim == num_dims_ -1)
return;
IndexArray halo_offset(0);
if(fw)
{
halo_offset[dim] = my_real_size_[dim] - halo_.fw[dim];
}
else
{
halo_offset[dim] = halo_.bw[dim] - width;
}
char *halo_buf = (char *)((fw ? halo_peer_fw_[dim] : halo_peer_bw_[dim])->Get());
IndexArray halo_size = my_real_size_;
halo_size[dim] = width;
CopyinSubgrid(unit_size_, num_dims_, data_[0], my_real_size_,
halo_buf, halo_offset, halo_size);
}
//copy halo from the grid into the (send) buffer
//note that the memory area to be sent is within the private grid
//, other than the halo area
void GridMPI::CopyoutHalo(int dim, unsigned width, bool fw)
{
IndexArray halo_offset(0);
if(fw)
{
halo_offset[dim] = halo_.bw[dim];
}
else
{
halo_offset[dim] = my_real_size_[dim] - halo_.fw[dim] - width;
}
//std::cout << "halo offset: "
// << halo_offset << "\n";
//char *tmp;
//char **halo_buf = &tmp;
//char *halo_buf = fw ? (char *)(halo_self_fw_[dim]->buf_) : (char *)(halo_self_bw_[dim]->buf_);
// The slowest changing dimension does not need actual copying
// because its halo region is physically continuous.
if (dim == (num_dims_ - 1))
{
char *p = data_[0] + GridCalcOffset3D(halo_offset, my_real_size_) * unit_size_;
//*halo_buf = p;
if(fw)
{
halo_self_fw_[dim]->buf_ = p;
}
else
{
halo_self_bw_[dim]->buf_ = p;
}
return;
}
else
{
//IndexArray halo_size = my_real_size_;
//halo_size[dim] = width;
//CopyoutSubgrid(unit_size_, num_dims_, data_[0], my_real_size_, halo_buf, halo_offset, halo_size);
return;
}
}
void GridMPI::DeleteBuffers()
{
DeleteHaloBuffers();
Grid::DeleteBuffers();
}
GridMPI::~GridMPI()
{
DeleteBuffers();
}
void GridMPI::DeleteHaloBuffers()
{
for (int i = 0; i < num_dims_ - 1; ++i)
{
if (halo_self_fw_) delete (halo_self_fw_[i]);
if (halo_self_bw_) delete (halo_self_bw_[i]);
if (halo_peer_fw_) delete (halo_peer_fw_[i]);
if (halo_peer_bw_) delete (halo_peer_bw_[i]);
}
if(halo_self_fw_)
PS_XDELETEA(halo_self_fw_);
if(halo_self_bw_)
PS_XDELETEA(halo_self_bw_);
if(halo_peer_fw_)
PS_XDELETEA(halo_peer_fw_);
if(halo_peer_bw_)
PS_XDELETEA(halo_peer_bw_);
//printf("Deleting...................................\n");
//delete copy_buffer_;
}
void GridMPI::Copyin(const void *src)
{
void *dst = buffer()->Get();
if (HasHalo())
{
CopyinSubgrid(unit_size(), num_dims_,
dst, my_real_size(),
src, halo_.bw, my_size());
}
else
{
memcpy(dst, src, GetLocalBufferSize());
}
return;
}
void GridMPI::Copyout(void *dst)
{
const void *src = buffer()->Get();
if (HasHalo())
{
IndexArray offset(halo_.bw);
CopyoutSubgrid(unit_size(), num_dims_,
src, my_real_size(),
dst, offset, my_size());
}
else
{
memcpy(dst, src, GetLocalBufferSize());
}
return;
}
|
7caab4fe1b450299212d5d37c752d546bd5a7dfe.cu
|
#include "grid_mpi.h"
#include "data_util.h"
#include "macro.h"
//#include "buffer_cuda.h"
#include <string.h>
#include <iostream>
#include <stdio.h>
using namespace std;
size_t GridMPI::CalcHaloSize(int dim, unsigned width)
{
IndexArray halo_size = my_real_size_;
halo_size[dim] = width;
return halo_size.accumulate(num_dims_);
}
GridMPI::GridMPI(int unit_size, int num_dims,
const IndexArray &size,
const IndexArray &my_offset,
const IndexArray &my_size,
const Width2 &halo):
Grid(unit_size, num_dims, size),
my_offset_(my_offset),
my_size_(my_size),
halo_(halo),
halo_self_fw_(NULL),
halo_self_bw_(NULL),
halo_peer_fw_(NULL),
halo_peer_bw_(NULL)
{
my_real_size_ = my_size_;
my_real_offset_ = my_offset_;
for(int i = 0; i < num_dims; i++)
{
my_real_size_[i] += halo.fw[i] + halo.bw[i];
my_real_offset_[i] -= halo.bw[i];
}
}
GridMPI *GridMPI::Create(
int unit_size,
int num_dims, const IndexArray &size,
const IndexArray &local_offset,
const IndexArray &local_size,
const Width2 &halo)
{
GridMPI *g = new GridMPI(
unit_size,
num_dims, size,
local_offset,
local_size,
halo);
g->InitBuffer();
return g;
}
//we are using double buffering
void GridMPI::InitBuffer()
{
data_buffer_[0] = new BufferHost();
data_buffer_[0]->Allocate(num_dims_, unit_size_, my_real_size_);
data_[0] = (char*)data_buffer_[0]->Get();
data_buffer_[1] = new BufferHost();
data_buffer_[1]->Allocate(num_dims_, unit_size_, my_real_size_);
data_[1] = (char*)data_buffer_[1]->Get();
InitHaloBuffers();
}
void GridMPI::InitHaloBuffers()
{
halo_self_fw_ = new BufferHost*[num_dims_];
halo_self_bw_ = new BufferHost*[num_dims_];
halo_peer_fw_ = new BufferHost*[num_dims_];
halo_peer_bw_ = new BufferHost*[num_dims_];
for (int i = 0; i < num_dims_; ++i)
{
halo_self_fw_[i] = halo_self_bw_[i] = NULL;
halo_peer_fw_[i] = halo_peer_bw_[i] = NULL;
if (halo_.fw[i])
{
halo_self_fw_[i] = new BufferHost();
halo_peer_fw_[i] = new BufferHost();
IndexArray size = my_real_size_;
size[i] = halo_.fw[i];
halo_self_fw_[i]->Allocate(num_dims_, unit_size_, size);
halo_peer_fw_[i]->Allocate(num_dims_, unit_size_, size);
//printf("halo: #######%ld\n", halo_peer_fw_[i]);
}
if (halo_.bw[i])
{
IndexArray size = my_real_size_;
size[i] = halo_.bw[i];
halo_self_bw_[i]= new BufferHost();
halo_peer_bw_[i]= new BufferHost();
halo_self_bw_[i]->Allocate(num_dims_, unit_size_, size);
halo_peer_bw_[i]->Allocate(num_dims_, unit_size_, size);
}
}
printf("Initting.......................\n");
//IndexArray copy_size = my_real_size_;
//copy_size[num_dims_-1] = halo_.bw[num_dims_-1];
//cout<<"&&&&&&&&&&&&my real size: "<<copy_size<<endl;
//BufferCUDAHost *copy_buffer_ = new BufferCUDAHost();
//copy_buffer_ -> Allocate(num_dims_, unit_size_, copy_size);
//delete copy_buffer_;
//IndexArray copy_size = my_real_size_;
//copy_size[num_dims_-1] = halo_.bw[num_dims_-1];
//cout<<"&&&&&&&&&&&&my real size: "<<copy_size<<endl;
//BufferCUDAHost *copy_buffer_ = new BufferCUDAHost();
//size_t s = copy_size.accumulate(num_dims_);
//if (s == 0) exit(-1);
//size_t linear_size = GetLinearSize(num_dims_, unit_size_, copy_size);
////printf("&&&&&&linear size: %ld, elm_size_: %ld&&&&&&\n", linear_size, elm_size_);
////cout<<"multi dim size: "<<size<<endl;
//void *ptr = NULL;
//CUDA_SAFE_CALL(cudaHostAlloc(&ptr,
//linear_size,
//cudaHostAllocPortable|cudaHostAllocMapped));
//printf("linear size: %ld ptr: %ld\n", linear_size, ptr);
//copy_buffer_->buf_ = ptr;
/////copy_buffer_ -> Allocate(num_dims_, unit_size_, copy_size);
//delete copy_buffer_;
//printf("^^^^^^^^^^^^^^^^^^buffer pointer: %ld\n", copy_buffer_->Get());
//printf("^^^^^^^^^^^^^^^^^^Device buffer pointer: %ld\n", copy_buffer_->DeviceBuf());
}
//copy data from Halo to grid
void GridMPI::CopyinHalo(int dim, unsigned width, bool fw)
{
//highest dim does not need copying
if(dim == num_dims_ -1)
return;
IndexArray halo_offset(0);
if(fw)
{
halo_offset[dim] = my_real_size_[dim] - halo_.fw[dim];
}
else
{
halo_offset[dim] = halo_.bw[dim] - width;
}
char *halo_buf = (char *)((fw ? halo_peer_fw_[dim] : halo_peer_bw_[dim])->Get());
IndexArray halo_size = my_real_size_;
halo_size[dim] = width;
CopyinSubgrid(unit_size_, num_dims_, data_[0], my_real_size_,
halo_buf, halo_offset, halo_size);
}
//copy halo from the grid into the (send) buffer
//note that the memory area to be sent is within the private grid
//, other than the halo area
void GridMPI::CopyoutHalo(int dim, unsigned width, bool fw)
{
IndexArray halo_offset(0);
if(fw)
{
halo_offset[dim] = halo_.bw[dim];
}
else
{
halo_offset[dim] = my_real_size_[dim] - halo_.fw[dim] - width;
}
//std::cout << "halo offset: "
// << halo_offset << "\n";
//char *tmp;
//char **halo_buf = &tmp;
//char *halo_buf = fw ? (char *)(halo_self_fw_[dim]->buf_) : (char *)(halo_self_bw_[dim]->buf_);
// The slowest changing dimension does not need actual copying
// because its halo region is physically continuous.
if (dim == (num_dims_ - 1))
{
char *p = data_[0] + GridCalcOffset3D(halo_offset, my_real_size_) * unit_size_;
//*halo_buf = p;
if(fw)
{
halo_self_fw_[dim]->buf_ = p;
}
else
{
halo_self_bw_[dim]->buf_ = p;
}
return;
}
else
{
//IndexArray halo_size = my_real_size_;
//halo_size[dim] = width;
//CopyoutSubgrid(unit_size_, num_dims_, data_[0], my_real_size_, halo_buf, halo_offset, halo_size);
return;
}
}
void GridMPI::DeleteBuffers()
{
DeleteHaloBuffers();
Grid::DeleteBuffers();
}
GridMPI::~GridMPI()
{
DeleteBuffers();
}
void GridMPI::DeleteHaloBuffers()
{
for (int i = 0; i < num_dims_ - 1; ++i)
{
if (halo_self_fw_) delete (halo_self_fw_[i]);
if (halo_self_bw_) delete (halo_self_bw_[i]);
if (halo_peer_fw_) delete (halo_peer_fw_[i]);
if (halo_peer_bw_) delete (halo_peer_bw_[i]);
}
if(halo_self_fw_)
PS_XDELETEA(halo_self_fw_);
if(halo_self_bw_)
PS_XDELETEA(halo_self_bw_);
if(halo_peer_fw_)
PS_XDELETEA(halo_peer_fw_);
if(halo_peer_bw_)
PS_XDELETEA(halo_peer_bw_);
//printf("Deleting...................................\n");
//delete copy_buffer_;
}
void GridMPI::Copyin(const void *src)
{
void *dst = buffer()->Get();
if (HasHalo())
{
CopyinSubgrid(unit_size(), num_dims_,
dst, my_real_size(),
src, halo_.bw, my_size());
}
else
{
memcpy(dst, src, GetLocalBufferSize());
}
return;
}
void GridMPI::Copyout(void *dst)
{
const void *src = buffer()->Get();
if (HasHalo())
{
IndexArray offset(halo_.bw);
CopyoutSubgrid(unit_size(), num_dims_,
src, my_real_size(),
dst, offset, my_size());
}
else
{
memcpy(dst, src, GetLocalBufferSize());
}
return;
}
|
ea2b6db48c7ca71e34c1c4fe4fe385a50bf58cb6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_malloc.h>
#include <thrust/device_ptr.h>
#include "common.h"
#define NUM_THREADS 256
extern double size;
static texture<int2, 1, hipReadModeElementType> old_pos_tex;
static texture<int2, 1, hipReadModeElementType> old_vel_tex;
static texture<int2, 1, hipReadModeElementType> old_acc_tex;
static texture<int, 1, hipReadModeElementType> bin_index_tex;
static texture<int, 1, hipReadModeElementType> particle_index_tex;
static texture<int, 1, hipReadModeElementType> bin_start_tex;
static texture<int, 1, hipReadModeElementType> bin_end_tex;
static __inline__ __device__ double fetch_double(texture<int2, 1> t, int i)
{
int2 v = tex1Dfetch(t, i);
return __hiloint2double(v.y, v.x);
}
//
// benchmarking program
//
void init_particles_gpu(int n, double *pos, double *vel)
{
srand48( time( NULL ) );
int sx = (int)ceil(sqrt((double)n));
int sy = (n+sx-1)/sx;
int *shuffle = (int*)malloc( n * sizeof(int) );
for( int i = 0; i < n; i++ )
shuffle[i] = i;
for( int i = 0; i < n; i++ )
{
//
// make sure particles are not spatially sorted
//
int j = lrand48()%(n-i);
int k = shuffle[j];
shuffle[j] = shuffle[n-i-1];
//
// distribute particles evenly to ensure proper spacing
//
pos[2*i] = size*(1.+(k%sx))/(1+sx);
pos[2*i+1] = size*(1.+(k/sx))/(1+sy);
//
// assign random velocities within a bound
//
vel[2*i] = drand48()*2-1;
vel[2*i+1] = drand48()*2-1;
}
free( shuffle );
}
void sort_particles(int *bin_index, int *particle_index, int n)
{
thrust::sort_by_key(thrust::device_ptr<int>(bin_index),
thrust::device_ptr<int>(bin_index + n),
thrust::device_ptr<int>(particle_index));
}
// calculate particle's bin number
static __inline__ __device__ int binNum(double &d_x, double &d_y, int bpr)
{
return ( floor(d_x/cutoff) + bpr*floor(d_y/cutoff) );
}
__global__ void reorder_data_calc_bin(int *bin_start, int *bin_end, double *sorted_pos, double *sorted_vel, double *sorted_acc, int *bin_index, int *particle_index, double *d_pos, double *d_vel, double *d_acc, int n, int num_bins)
{
extern __shared__ int sharedHash[]; // blockSize + 1 elements
int index = threadIdx.x + blockIdx.x * blockDim.x;
int bi;
if (index < n) {
bi = bin_index[index];
sharedHash[threadIdx.x+1] = bi;
if (index > 0 && threadIdx.x == 0)
{
// first thread in block must load neighbor particle hash
sharedHash[0] = bin_index[index-1];
}
}
__syncthreads();
if (index < n) {
// If this particle has a different cell index to the previous
// particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of
// the previous particle's cell
if (index == 0 || bi != sharedHash[threadIdx.x])
{
bin_start[bi] = index;
if (index > 0)
bin_end[sharedHash[threadIdx.x]] = index;
}
if (index == n - 1)
{
bin_end[bi] = index + 1;
}
// Now use the sorted index to reorder the pos and vel data
int sortedIndex = particle_index[index];
sorted_pos[2*index] = d_pos[2*sortedIndex];
sorted_pos[2*index+1] = d_pos[2*sortedIndex+1];
sorted_vel[2*index] = d_vel[2*sortedIndex];
sorted_vel[2*index+1] = d_vel[2*sortedIndex+1];
sorted_acc[2*index] = d_acc[2*sortedIndex];
sorted_acc[2*index+1] = d_acc[2*sortedIndex+1];
}
}
__global__ void calculate_bin_index(int *bin_index, int *particle_index, double *d_pos, int n, int bpr)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index >= n) return;
double pos_x = fetch_double(old_pos_tex, 2*index);
double pos_y = fetch_double(old_pos_tex, 2*index+1);
int cbin = binNum( pos_x,pos_y,bpr );
bin_index[index] = cbin;
particle_index[index] = index;
}
static __inline__ __device__ void apply_force_gpu(double &particle_x, double &particle_y, double &particle_ax, double &particle_ay, double &neighbor_x, double &neighbor_y)
{
double dx = neighbor_x - particle_x;
double dy = neighbor_y - particle_y;
double r2 = dx * dx + dy * dy;
if( r2 > cutoff*cutoff )
return;
//r2 = fmax( r2, min_r*min_r );
r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r;
double r = sqrt( r2 );
//
// very simple short-range repulsive force
//
double coef = ( 1 - cutoff / r ) / r2 / mass;
particle_ax += coef * dx;
particle_ay += coef * dy;
}
__global__ void compute_forces_gpu(double *pos, double *acc, int n, int bpr, int *bin_start, int *bin_end)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
double pos_1x = fetch_double(old_pos_tex, 2*tid);
double pos_1y = fetch_double(old_pos_tex, 2*tid+1);
// find current particle's in, handle boundaries
int cbin = binNum( pos_1x, pos_1y, bpr );
int lowi = -1, highi = 1, lowj = -1, highj = 1;
if (cbin < bpr)
lowj = 0;
if (cbin % bpr == 0)
lowi = 0;
if (cbin % bpr == (bpr-1))
highi = 0;
if (cbin >= bpr*(bpr-1))
highj = 0;
double acc_x;
double acc_y;
acc_x = acc_y = 0;
for (int i = lowi; i <= highi; i++)
for (int j = lowj; j <= highj; j++)
{
int nbin = cbin + i + bpr*j;
int bin_st = tex1Dfetch(bin_start_tex, nbin);
if (bin_st != 0xffffffff) {
int bin_et = tex1Dfetch(bin_end_tex, nbin);
for (int k = bin_st; k < bin_et; k++ ) {
double pos_2x = fetch_double(old_pos_tex, 2*k);
double pos_2y = fetch_double(old_pos_tex, 2*k+1);
apply_force_gpu( pos_1x, pos_1y, acc_x, acc_y, pos_2x, pos_2y );
}
}
}
acc[2*tid] = acc_x;
acc[2*tid+1] = acc_y;
}
__global__ void move_gpu (double *pos, double *vel, double *acc, int n, double size)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
//
// slightly simplified Velocity Verlet integration
// conserves energy better than explicit Euler method
//
double acc_x = fetch_double(old_acc_tex, 2*tid);
double acc_y = fetch_double(old_acc_tex, 2*tid+1);
double vel_x = fetch_double(old_vel_tex, 2*tid);
double vel_y = fetch_double(old_vel_tex, 2*tid+1);
double pos_x = fetch_double(old_pos_tex, 2*tid);
double pos_y = fetch_double(old_pos_tex, 2*tid+1);
vel_x += acc_x * dt;
vel_y += acc_y * dt;
pos_x += vel_x * dt;
pos_y += vel_y * dt;
//
// bounce from walls
//
while( pos_x < 0 || pos_x > size )
{
pos_x = pos_x < 0 ? -(pos_x) : 2*size-pos_x;
vel_x = -(vel_x);
}
while( pos_y < 0 || pos_y > size )
{
pos_y = pos_y < 0 ? -(pos_y) : 2*size-pos_y;
vel_y = -(vel_y);
}
vel[2*tid] = vel_x;
vel[2*tid+1] = vel_y;
pos[2*tid] = pos_x;
pos[2*tid+1] = pos_y;
}
int main( int argc, char **argv )
{
// This takes a few seconds to initialize the runtime
hipDeviceSynchronize();
if( find_option( argc, argv, "-h" ) >= 0 )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-n <int> to set the number of particles\n" );
printf( "-o <filename> to specify the output file name\n" );
printf( "-s <filename> to specify the summary output file name\n" );
return 0;
}
int n = read_int( argc, argv, "-n", 1000 );
char *savename = read_string( argc, argv, "-o", NULL );
char *sumname = read_string( argc, argv, "-s", NULL );
FILE *fsave = savename ? fopen( savename, "w" ) : NULL;
FILE *fsum = sumname ? fopen(sumname,"a") : NULL;
double *pos = (double *) malloc( 2*n * sizeof(double) );
double *vel = (double *) malloc( 2*n * sizeof(double) );
double *acc = (double *) malloc( 2*n * sizeof(double) );
// GPU particle data structure
double *d_pos;
double *d_vel;
double *d_acc;
hipMalloc((void **) &d_pos, 2*n * sizeof(double));
hipMalloc((void **) &d_vel, 2*n * sizeof(double));
hipMalloc((void **) &d_acc, 2*n * sizeof(double));
double *sorted_pos;
double *sorted_vel;
double *sorted_acc;
hipMalloc((void **) &sorted_pos, 2*n * sizeof(double));
hipMalloc((void **) &sorted_vel, 2*n * sizeof(double));
hipMalloc((void **) &sorted_acc, 2*n * sizeof(double));
int *bin_index;
hipMalloc((void **) &bin_index, n * sizeof(int));
hipMemset(bin_index, 0x0, n * sizeof(int));
int *particle_index;
hipMalloc((void **) &particle_index, n * sizeof(int));
hipMemset(particle_index, 0x0, n * sizeof(int));
set_size( n );
init_particles_gpu(n, pos, vel);
// create spatial bins (of size cutoff by cutoff)
double size = sqrt( density*n );
int bpr = ceil(size/cutoff);
int num_bins = bpr*bpr;
int *bin_start;
int *bin_end;
hipMalloc((void **) &bin_start, num_bins * sizeof(int));
hipMalloc((void **) &bin_end, num_bins * sizeof(int));
hipMemset(bin_start, 0x0, num_bins * sizeof(int));
hipMemset(bin_end, 0x0, num_bins * sizeof(int));
hipDeviceSynchronize();
double copy_time = read_timer( );
// Copy the particles to the GPU
hipMemcpy(d_pos, pos, 2*n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_vel, vel, 2*n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_acc, acc, 2*n * sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
copy_time = read_timer( ) - copy_time;
//
// simulate a number of time steps
//
hipDeviceSynchronize();
double simulation_time = read_timer( );
for( int step = 0; step < NSTEPS; step++ )
{
int blks = (n + NUM_THREADS - 1) / NUM_THREADS;
hipBindTexture(0, old_pos_tex, d_pos, 2*n * sizeof(int2));
hipLaunchKernelGGL(( calculate_bin_index) , dim3(blks), dim3(NUM_THREADS) , 0, 0, bin_index, particle_index, d_pos, n, bpr);
hipUnbindTexture(old_pos_tex);
hipBindTexture(0, bin_index_tex, bin_index, n * sizeof(int));
hipBindTexture(0, particle_index_tex, particle_index, n * sizeof(int));
sort_particles(bin_index, particle_index, n);
hipUnbindTexture(bin_index_tex);
hipUnbindTexture(particle_index_tex);
hipMemset(bin_start, 0xffffffff, num_bins * sizeof(int));
int smemSize = sizeof(int)*(NUM_THREADS+1);
hipLaunchKernelGGL(( reorder_data_calc_bin) , dim3(blks), dim3(NUM_THREADS), smemSize , 0, bin_start, bin_end, sorted_pos, sorted_vel, sorted_acc, bin_index, particle_index, d_pos, d_vel, d_acc, n, num_bins);
hipBindTexture(0, old_pos_tex, sorted_pos, 2*n * sizeof(int2));
hipBindTexture(0, bin_start_tex, bin_start, num_bins * sizeof(int));
hipBindTexture(0, bin_end_tex, bin_end, num_bins * sizeof(int));
hipLaunchKernelGGL(( compute_forces_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, sorted_pos, sorted_acc, n, bpr, bin_start, bin_end);
hipUnbindTexture(old_pos_tex);
hipUnbindTexture(bin_start_tex);
hipUnbindTexture(bin_end_tex);
//
// move particles
//
hipBindTexture(0, old_pos_tex, sorted_pos, 2*n * sizeof(int2));
hipBindTexture(0, old_vel_tex, sorted_vel, 2*n * sizeof(int2));
hipBindTexture(0, old_acc_tex, sorted_acc, 2*n * sizeof(int2));
hipLaunchKernelGGL(( move_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, sorted_pos, sorted_vel, sorted_acc, n, size);
hipUnbindTexture(old_pos_tex);
hipUnbindTexture(old_vel_tex);
hipUnbindTexture(old_acc_tex);
//
// Swap particles between d_particles and sorted_particles
//
double *temp_pos = sorted_pos;
double *temp_vel = sorted_vel;
double *temp_acc = sorted_acc;
sorted_pos = d_pos;
sorted_vel = d_vel;
sorted_acc = d_acc;
d_pos = temp_pos;
d_vel = temp_vel;
d_acc = temp_acc;
}
hipDeviceSynchronize();
simulation_time = read_timer( ) - simulation_time;
particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );
if( fsave ) {
// Copy the particles back to the CPU
hipMemcpy(pos, d_pos, 2*n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(vel, d_vel, 2*n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(acc, d_acc, 2*n * sizeof(double), hipMemcpyDeviceToHost);
for(int i=0; i<n; ++i){
particles[i].x = pos[2*i];particles[i].y = pos[2*i+1];
particles[i].vx = vel[2*i];particles[i].vy = vel[2*i+1];
particles[i].ax = acc[2*i];particles[i].ay = acc[2*i+1];
}
save( fsave, n, particles);
}
printf( "CPU-GPU copy time = %g seconds\n", copy_time);
printf( "n = %d, simulation time = %g seconds\n", n, simulation_time );
if (fsum)
fprintf(fsum,"%d %lf \n",n,simulation_time);
if (fsum)
fclose( fsum );
free( particles );
free(pos);
free(vel);
free(acc);
hipFree(d_pos);
hipFree(d_vel);
hipFree(d_acc);
hipFree(sorted_pos);
hipFree(sorted_vel);
hipFree(sorted_acc);
hipFree(bin_index);
hipFree(particle_index);
hipFree(bin_start);
hipFree(bin_end);
if( fsave )
fclose( fsave );
return 0;
}
|
ea2b6db48c7ca71e34c1c4fe4fe385a50bf58cb6.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <cuda.h>
#include <vector>
#include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_malloc.h>
#include <thrust/device_ptr.h>
#include "common.h"
#define NUM_THREADS 256
extern double size;
static texture<int2, 1, cudaReadModeElementType> old_pos_tex;
static texture<int2, 1, cudaReadModeElementType> old_vel_tex;
static texture<int2, 1, cudaReadModeElementType> old_acc_tex;
static texture<int, 1, cudaReadModeElementType> bin_index_tex;
static texture<int, 1, cudaReadModeElementType> particle_index_tex;
static texture<int, 1, cudaReadModeElementType> bin_start_tex;
static texture<int, 1, cudaReadModeElementType> bin_end_tex;
static __inline__ __device__ double fetch_double(texture<int2, 1> t, int i)
{
int2 v = tex1Dfetch(t, i);
return __hiloint2double(v.y, v.x);
}
//
// benchmarking program
//
void init_particles_gpu(int n, double *pos, double *vel)
{
srand48( time( NULL ) );
int sx = (int)ceil(sqrt((double)n));
int sy = (n+sx-1)/sx;
int *shuffle = (int*)malloc( n * sizeof(int) );
for( int i = 0; i < n; i++ )
shuffle[i] = i;
for( int i = 0; i < n; i++ )
{
//
// make sure particles are not spatially sorted
//
int j = lrand48()%(n-i);
int k = shuffle[j];
shuffle[j] = shuffle[n-i-1];
//
// distribute particles evenly to ensure proper spacing
//
pos[2*i] = size*(1.+(k%sx))/(1+sx);
pos[2*i+1] = size*(1.+(k/sx))/(1+sy);
//
// assign random velocities within a bound
//
vel[2*i] = drand48()*2-1;
vel[2*i+1] = drand48()*2-1;
}
free( shuffle );
}
void sort_particles(int *bin_index, int *particle_index, int n)
{
thrust::sort_by_key(thrust::device_ptr<int>(bin_index),
thrust::device_ptr<int>(bin_index + n),
thrust::device_ptr<int>(particle_index));
}
// calculate particle's bin number
static __inline__ __device__ int binNum(double &d_x, double &d_y, int bpr)
{
return ( floor(d_x/cutoff) + bpr*floor(d_y/cutoff) );
}
__global__ void reorder_data_calc_bin(int *bin_start, int *bin_end, double *sorted_pos, double *sorted_vel, double *sorted_acc, int *bin_index, int *particle_index, double *d_pos, double *d_vel, double *d_acc, int n, int num_bins)
{
extern __shared__ int sharedHash[]; // blockSize + 1 elements
int index = threadIdx.x + blockIdx.x * blockDim.x;
int bi;
if (index < n) {
bi = bin_index[index];
sharedHash[threadIdx.x+1] = bi;
if (index > 0 && threadIdx.x == 0)
{
// first thread in block must load neighbor particle hash
sharedHash[0] = bin_index[index-1];
}
}
__syncthreads();
if (index < n) {
// If this particle has a different cell index to the previous
// particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of
// the previous particle's cell
if (index == 0 || bi != sharedHash[threadIdx.x])
{
bin_start[bi] = index;
if (index > 0)
bin_end[sharedHash[threadIdx.x]] = index;
}
if (index == n - 1)
{
bin_end[bi] = index + 1;
}
// Now use the sorted index to reorder the pos and vel data
int sortedIndex = particle_index[index];
sorted_pos[2*index] = d_pos[2*sortedIndex];
sorted_pos[2*index+1] = d_pos[2*sortedIndex+1];
sorted_vel[2*index] = d_vel[2*sortedIndex];
sorted_vel[2*index+1] = d_vel[2*sortedIndex+1];
sorted_acc[2*index] = d_acc[2*sortedIndex];
sorted_acc[2*index+1] = d_acc[2*sortedIndex+1];
}
}
__global__ void calculate_bin_index(int *bin_index, int *particle_index, double *d_pos, int n, int bpr)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index >= n) return;
double pos_x = fetch_double(old_pos_tex, 2*index);
double pos_y = fetch_double(old_pos_tex, 2*index+1);
int cbin = binNum( pos_x,pos_y,bpr );
bin_index[index] = cbin;
particle_index[index] = index;
}
static __inline__ __device__ void apply_force_gpu(double &particle_x, double &particle_y, double &particle_ax, double &particle_ay, double &neighbor_x, double &neighbor_y)
{
double dx = neighbor_x - particle_x;
double dy = neighbor_y - particle_y;
double r2 = dx * dx + dy * dy;
if( r2 > cutoff*cutoff )
return;
//r2 = fmax( r2, min_r*min_r );
r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r;
double r = sqrt( r2 );
//
// very simple short-range repulsive force
//
double coef = ( 1 - cutoff / r ) / r2 / mass;
particle_ax += coef * dx;
particle_ay += coef * dy;
}
__global__ void compute_forces_gpu(double *pos, double *acc, int n, int bpr, int *bin_start, int *bin_end)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
double pos_1x = fetch_double(old_pos_tex, 2*tid);
double pos_1y = fetch_double(old_pos_tex, 2*tid+1);
// find current particle's in, handle boundaries
int cbin = binNum( pos_1x, pos_1y, bpr );
int lowi = -1, highi = 1, lowj = -1, highj = 1;
if (cbin < bpr)
lowj = 0;
if (cbin % bpr == 0)
lowi = 0;
if (cbin % bpr == (bpr-1))
highi = 0;
if (cbin >= bpr*(bpr-1))
highj = 0;
double acc_x;
double acc_y;
acc_x = acc_y = 0;
for (int i = lowi; i <= highi; i++)
for (int j = lowj; j <= highj; j++)
{
int nbin = cbin + i + bpr*j;
int bin_st = tex1Dfetch(bin_start_tex, nbin);
if (bin_st != 0xffffffff) {
int bin_et = tex1Dfetch(bin_end_tex, nbin);
for (int k = bin_st; k < bin_et; k++ ) {
double pos_2x = fetch_double(old_pos_tex, 2*k);
double pos_2y = fetch_double(old_pos_tex, 2*k+1);
apply_force_gpu( pos_1x, pos_1y, acc_x, acc_y, pos_2x, pos_2y );
}
}
}
acc[2*tid] = acc_x;
acc[2*tid+1] = acc_y;
}
__global__ void move_gpu (double *pos, double *vel, double *acc, int n, double size)
{
// Get thread (particle) ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= n) return;
//
// slightly simplified Velocity Verlet integration
// conserves energy better than explicit Euler method
//
double acc_x = fetch_double(old_acc_tex, 2*tid);
double acc_y = fetch_double(old_acc_tex, 2*tid+1);
double vel_x = fetch_double(old_vel_tex, 2*tid);
double vel_y = fetch_double(old_vel_tex, 2*tid+1);
double pos_x = fetch_double(old_pos_tex, 2*tid);
double pos_y = fetch_double(old_pos_tex, 2*tid+1);
vel_x += acc_x * dt;
vel_y += acc_y * dt;
pos_x += vel_x * dt;
pos_y += vel_y * dt;
//
// bounce from walls
//
while( pos_x < 0 || pos_x > size )
{
pos_x = pos_x < 0 ? -(pos_x) : 2*size-pos_x;
vel_x = -(vel_x);
}
while( pos_y < 0 || pos_y > size )
{
pos_y = pos_y < 0 ? -(pos_y) : 2*size-pos_y;
vel_y = -(vel_y);
}
vel[2*tid] = vel_x;
vel[2*tid+1] = vel_y;
pos[2*tid] = pos_x;
pos[2*tid+1] = pos_y;
}
int main( int argc, char **argv )
{
// This takes a few seconds to initialize the runtime
cudaThreadSynchronize();
if( find_option( argc, argv, "-h" ) >= 0 )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-n <int> to set the number of particles\n" );
printf( "-o <filename> to specify the output file name\n" );
printf( "-s <filename> to specify the summary output file name\n" );
return 0;
}
int n = read_int( argc, argv, "-n", 1000 );
char *savename = read_string( argc, argv, "-o", NULL );
char *sumname = read_string( argc, argv, "-s", NULL );
FILE *fsave = savename ? fopen( savename, "w" ) : NULL;
FILE *fsum = sumname ? fopen(sumname,"a") : NULL;
double *pos = (double *) malloc( 2*n * sizeof(double) );
double *vel = (double *) malloc( 2*n * sizeof(double) );
double *acc = (double *) malloc( 2*n * sizeof(double) );
// GPU particle data structure
double *d_pos;
double *d_vel;
double *d_acc;
cudaMalloc((void **) &d_pos, 2*n * sizeof(double));
cudaMalloc((void **) &d_vel, 2*n * sizeof(double));
cudaMalloc((void **) &d_acc, 2*n * sizeof(double));
double *sorted_pos;
double *sorted_vel;
double *sorted_acc;
cudaMalloc((void **) &sorted_pos, 2*n * sizeof(double));
cudaMalloc((void **) &sorted_vel, 2*n * sizeof(double));
cudaMalloc((void **) &sorted_acc, 2*n * sizeof(double));
int *bin_index;
cudaMalloc((void **) &bin_index, n * sizeof(int));
cudaMemset(bin_index, 0x0, n * sizeof(int));
int *particle_index;
cudaMalloc((void **) &particle_index, n * sizeof(int));
cudaMemset(particle_index, 0x0, n * sizeof(int));
set_size( n );
init_particles_gpu(n, pos, vel);
// create spatial bins (of size cutoff by cutoff)
double size = sqrt( density*n );
int bpr = ceil(size/cutoff);
int num_bins = bpr*bpr;
int *bin_start;
int *bin_end;
cudaMalloc((void **) &bin_start, num_bins * sizeof(int));
cudaMalloc((void **) &bin_end, num_bins * sizeof(int));
cudaMemset(bin_start, 0x0, num_bins * sizeof(int));
cudaMemset(bin_end, 0x0, num_bins * sizeof(int));
cudaThreadSynchronize();
double copy_time = read_timer( );
// Copy the particles to the GPU
cudaMemcpy(d_pos, pos, 2*n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_vel, vel, 2*n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_acc, acc, 2*n * sizeof(double), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
copy_time = read_timer( ) - copy_time;
//
// simulate a number of time steps
//
cudaThreadSynchronize();
double simulation_time = read_timer( );
for( int step = 0; step < NSTEPS; step++ )
{
int blks = (n + NUM_THREADS - 1) / NUM_THREADS;
cudaBindTexture(0, old_pos_tex, d_pos, 2*n * sizeof(int2));
calculate_bin_index <<< blks, NUM_THREADS >>> (bin_index, particle_index, d_pos, n, bpr);
cudaUnbindTexture(old_pos_tex);
cudaBindTexture(0, bin_index_tex, bin_index, n * sizeof(int));
cudaBindTexture(0, particle_index_tex, particle_index, n * sizeof(int));
sort_particles(bin_index, particle_index, n);
cudaUnbindTexture(bin_index_tex);
cudaUnbindTexture(particle_index_tex);
cudaMemset(bin_start, 0xffffffff, num_bins * sizeof(int));
int smemSize = sizeof(int)*(NUM_THREADS+1);
reorder_data_calc_bin <<< blks, NUM_THREADS, smemSize >>> (bin_start, bin_end, sorted_pos, sorted_vel, sorted_acc, bin_index, particle_index, d_pos, d_vel, d_acc, n, num_bins);
cudaBindTexture(0, old_pos_tex, sorted_pos, 2*n * sizeof(int2));
cudaBindTexture(0, bin_start_tex, bin_start, num_bins * sizeof(int));
cudaBindTexture(0, bin_end_tex, bin_end, num_bins * sizeof(int));
compute_forces_gpu <<< blks, NUM_THREADS >>> (sorted_pos, sorted_acc, n, bpr, bin_start, bin_end);
cudaUnbindTexture(old_pos_tex);
cudaUnbindTexture(bin_start_tex);
cudaUnbindTexture(bin_end_tex);
//
// move particles
//
cudaBindTexture(0, old_pos_tex, sorted_pos, 2*n * sizeof(int2));
cudaBindTexture(0, old_vel_tex, sorted_vel, 2*n * sizeof(int2));
cudaBindTexture(0, old_acc_tex, sorted_acc, 2*n * sizeof(int2));
move_gpu <<< blks, NUM_THREADS >>> (sorted_pos, sorted_vel, sorted_acc, n, size);
cudaUnbindTexture(old_pos_tex);
cudaUnbindTexture(old_vel_tex);
cudaUnbindTexture(old_acc_tex);
//
// Swap particles between d_particles and sorted_particles
//
double *temp_pos = sorted_pos;
double *temp_vel = sorted_vel;
double *temp_acc = sorted_acc;
sorted_pos = d_pos;
sorted_vel = d_vel;
sorted_acc = d_acc;
d_pos = temp_pos;
d_vel = temp_vel;
d_acc = temp_acc;
}
cudaThreadSynchronize();
simulation_time = read_timer( ) - simulation_time;
particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) );
if( fsave ) {
// Copy the particles back to the CPU
cudaMemcpy(pos, d_pos, 2*n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(vel, d_vel, 2*n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(acc, d_acc, 2*n * sizeof(double), cudaMemcpyDeviceToHost);
for(int i=0; i<n; ++i){
particles[i].x = pos[2*i];particles[i].y = pos[2*i+1];
particles[i].vx = vel[2*i];particles[i].vy = vel[2*i+1];
particles[i].ax = acc[2*i];particles[i].ay = acc[2*i+1];
}
save( fsave, n, particles);
}
printf( "CPU-GPU copy time = %g seconds\n", copy_time);
printf( "n = %d, simulation time = %g seconds\n", n, simulation_time );
if (fsum)
fprintf(fsum,"%d %lf \n",n,simulation_time);
if (fsum)
fclose( fsum );
free( particles );
free(pos);
free(vel);
free(acc);
cudaFree(d_pos);
cudaFree(d_vel);
cudaFree(d_acc);
cudaFree(sorted_pos);
cudaFree(sorted_vel);
cudaFree(sorted_acc);
cudaFree(bin_index);
cudaFree(particle_index);
cudaFree(bin_start);
cudaFree(bin_end);
if( fsave )
fclose( fsave );
return 0;
}
|
a7ff7c25f3562806b1760623774a6a9ef725013b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include "MapActor.hxx"
#define UPPER_LEFT(x) x - width - 1
#define UPPER_MID(x) x - width
#define UPPER_RIGHT(x) x - width + 1
#define LEFT(X) x - 1
#define RIGHT(X) x + 1
#define LOWER_LEFT(x) x + width - 1
#define LOWER_MID(x) x + width
#define LOWER_RIGHT(x) + width + 1
#define NUM_THREADS 256
#define NUM_THRESHOLD 2
__host__ __device__ MapActor::MapActor(unsigned int width, unsigned int height) {
m_width = width;
m_height = height;
}
__host__ __device__ MapActor::~MapActor() {
for (int i = 0; i < m_width * m_height; i++) {
}
}
__global__ void compute_free(SchellingActor* actorsGlobal, unsigned int width, unsigned int height, unsigned int* positionIndexMap) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
__shared__ Actor previousActors[width * height];
if (idy * width + idx < width * height) {
previousActors[idy * width + idx] = actorsGlobal[idy * width + idx];
if (previousActors[idy * width + idx].type() == 'f')
//Position map index needs to equal 1 if free; 0 if not free. Use this data structure as a representation of a
positionIndexMap[idy * width + idx] = 1;
}
}
__global__ void compute_shifts(SchellingActor* actorsGlobal, unsigned int width, unsigned int height, unsigned int threshold, unsigned int*
actorsThatNeedShifting){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
Actor adjacentActors[8];
__shared__ Actor previousActors[width * height];
previousActors[idy * width + idx] = actorsGlobal[idy * width + idx];
__syncthreads();
// TODO: Must implement bounds checking for this
adjacentActors[0] = actorsGlobal[UPPER_LEFT(idy * width + idx)];
adjacentActors[1] = actorsGlobal[UPPER_MID(idy * width + idx)];
adjacentActors[2] = actorsGlobal[UPPER_RIGHT(idy * width + idx)];
adjacentActors[3] = actorsGlobal[LEFT(idy * width + idx)];
adjacentActors[4] = actorsGlobal[RIGHT(idy * width + idx)];
adjacentActors[5] = actorsGlobal[LOWER_LEFT(idy * width + idx)];
adjacentActors[6] = actorsGlobal[LOWER_MID(idy * width + idx)];
adjacentActors[7] = actorsGlobal[LOWER_RIGHT(idy * width + idx)];
for(int j = 0; j < 7; j++)
adjacentActors[idy * width + idx].send(adjacentActors[j], adjacentActors[j].type());
__syncthreads();
// If you're above the threshold, you MUST move. To do this, we need to create a histogram of what we're going to move
if (actorsGlobal[idy * width + idx].numberAdjacent() > threshold)
actorsThatNeedShifting[idy * width + idx]++;
__syncthreads();
}
// TODO: Don't try it.
/*
__global__ void parallel_zip_actors(unsigned int* freeActors, unsigned int* actorsThatNeedMoving, unsigned int width, unsigned int height) {
extern __shared__ unsigned int freeActorsValues[];
extern __shared__ unsigned int actorsThatNeed[];
__shared__ int freeActorArrPos;
__shared__ int actorsThatNeedArrPos;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if (freeActors[idy * width + idx] == 1){
freeActorsValues[freeActorArrPos] = idy * width + idx;
freeActorArrPos++;
}
if (actorsThatNeed[idy * width + idx] == 1){
actorsThatNeed[actorsThatNeedArrPos] = idy * width + idx;
actorsThatNeedArrPos++;
}
__syncthreads();
min(&freeActorArrPos, actorsThatNeedArrPos);
}
*/
bool zip_actors(Actor* totalActor, unsigned int* freeActors, unsigned int* actorsThatNeedMoving, unsigned int width, unsigned int height) {
// Both vectors have been allocated with the width * height
unsigned int prev_free = width * height + 1;
unsigned int prev_actorThatNeedsMoving = width * height + 1;
for (size_t i = 0; i < width * height; i++){
if (freeActors[i] == 1){
prev_free = i;
if (actorsThatNeedsMoving[i] == 1 )
prev_actorThatNeedsMoving = i;
//Now we determine whether we can make a shift
if (prev_actorThatNeedsMoving != width * height + 1 &&
prev_free != width * height + 1){
// Shift
Actor* actorFreeAddr = &(totalActor[prev_free]);
Actor* actorMoveAddr = &(totalActor[prev_actorThatNeedsMoving]);
totalActor[prev_free] = totalActor[prev_actorThatNeedsMoving];
totalActor[prev_actorThatNeedsMoving] = totalActor[prev_free];
prev_free = width * height + 1;
prev_actorThatNeedsMoving = width * height + 1;
}
}
}
// You MUST pass in a device-vector of the
__host__ void MapActor::moveActorsAround() {
unsigned int* freePositions_h = (unsigned int*) malloc(sizeof(unsigned int) * m_width * m_height);
unsigned int* freePositions_d;
unsigned int* actorsThatNeedMoving_h = (unsigned int*) malloc(sizeof(unsigned int) * m_width * m_height);
unsigned int* actorsThatNeedMoving_d;
hipMalloc((void**)&freePositions_d, sizeof(unsigned int) * m_width * m_height);
hipMemset(freePosiitons_d, 0, sizeof(unsigned int) * m_width * m_height);
hipMalloc((void**)&actorsThatNeedMoving_d, sizeof(unsigned int) * m_width * m_height);
hipMemset(actorsThatNeedMoving_d, 0, sizeof(unsigned int) * m_width * m_height);
dim3 blockDim((m_width - 1) / NUM_THREADS + 1, (m_height - 1) / NUM_THREADS + 1, 1);
dim3 threadDim(NUM_THREADS, NUM_THREADS, 0);
hipLaunchKernelGGL(( compute_free), dim3(blockDim), dim3(threadDim), 0, 0, m_map_d, m_width, m_height, freePositions_d);
hipLaunchKernelGGL(( compute_shifts), dim3(blockDim), dim3(threadDim), 0, 0, m_map_d, m_width, m_height, NUM_THRESHOLD, actorsThatNeedMoving_d);
hipLaunchKernelGGL(( parallel_zip_actors), dim3(blockDim), dim3(threadDim), 2 * m_width * m_height * sizeof(unsigned int), 0, freePositions_d, actorsThatNeedMoving_d, m_width, m_height);
//TODO: Compute shifts synchronously
hipMemcpy(freePositions_h, freePositions_d, sizeof(unsigned int) * m_width * m_height, hipMemcpyDeviceToHost);
hipMemcpy(actorsThatNeedMoving_h, actorsThatNeedMoving_d, sizeof(unsigned int) * m_width * m_height, hipMemcpyDeviceToHost);
zip_actors(freePositions_h, actorsThatNeedMoving_h, m_width, m_height);
free(freePositions_h);
free(actorsThatNeedMoving_h);
hipFree(actorsThatNeedMoving_d);
hipFree(freePositions_d);
}
__device__ void MapActor::react() {
}
void MapActor::send(Actor* actor, char* message) {
}
|
a7ff7c25f3562806b1760623774a6a9ef725013b.cu
|
#include <cuda.h>
#include <stdlib.h>
#include "MapActor.hxx"
#define UPPER_LEFT(x) x - width - 1
#define UPPER_MID(x) x - width
#define UPPER_RIGHT(x) x - width + 1
#define LEFT(X) x - 1
#define RIGHT(X) x + 1
#define LOWER_LEFT(x) x + width - 1
#define LOWER_MID(x) x + width
#define LOWER_RIGHT(x) + width + 1
#define NUM_THREADS 256
#define NUM_THRESHOLD 2
__host__ __device__ MapActor::MapActor(unsigned int width, unsigned int height) {
m_width = width;
m_height = height;
}
__host__ __device__ MapActor::~MapActor() {
for (int i = 0; i < m_width * m_height; i++) {
}
}
__global__ void compute_free(SchellingActor* actorsGlobal, unsigned int width, unsigned int height, unsigned int* positionIndexMap) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
__shared__ Actor previousActors[width * height];
if (idy * width + idx < width * height) {
previousActors[idy * width + idx] = actorsGlobal[idy * width + idx];
if (previousActors[idy * width + idx].type() == 'f')
//Position map index needs to equal 1 if free; 0 if not free. Use this data structure as a representation of a
positionIndexMap[idy * width + idx] = 1;
}
}
__global__ void compute_shifts(SchellingActor* actorsGlobal, unsigned int width, unsigned int height, unsigned int threshold, unsigned int*
actorsThatNeedShifting){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
Actor adjacentActors[8];
__shared__ Actor previousActors[width * height];
previousActors[idy * width + idx] = actorsGlobal[idy * width + idx];
__syncthreads();
// TODO: Must implement bounds checking for this
adjacentActors[0] = actorsGlobal[UPPER_LEFT(idy * width + idx)];
adjacentActors[1] = actorsGlobal[UPPER_MID(idy * width + idx)];
adjacentActors[2] = actorsGlobal[UPPER_RIGHT(idy * width + idx)];
adjacentActors[3] = actorsGlobal[LEFT(idy * width + idx)];
adjacentActors[4] = actorsGlobal[RIGHT(idy * width + idx)];
adjacentActors[5] = actorsGlobal[LOWER_LEFT(idy * width + idx)];
adjacentActors[6] = actorsGlobal[LOWER_MID(idy * width + idx)];
adjacentActors[7] = actorsGlobal[LOWER_RIGHT(idy * width + idx)];
for(int j = 0; j < 7; j++)
adjacentActors[idy * width + idx].send(adjacentActors[j], adjacentActors[j].type());
__syncthreads();
// If you're above the threshold, you MUST move. To do this, we need to create a histogram of what we're going to move
if (actorsGlobal[idy * width + idx].numberAdjacent() > threshold)
actorsThatNeedShifting[idy * width + idx]++;
__syncthreads();
}
// TODO: Don't try it.
/*
__global__ void parallel_zip_actors(unsigned int* freeActors, unsigned int* actorsThatNeedMoving, unsigned int width, unsigned int height) {
extern __shared__ unsigned int freeActorsValues[];
extern __shared__ unsigned int actorsThatNeed[];
__shared__ int freeActorArrPos;
__shared__ int actorsThatNeedArrPos;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if (freeActors[idy * width + idx] == 1){
freeActorsValues[freeActorArrPos] = idy * width + idx;
freeActorArrPos++;
}
if (actorsThatNeed[idy * width + idx] == 1){
actorsThatNeed[actorsThatNeedArrPos] = idy * width + idx;
actorsThatNeedArrPos++;
}
__syncthreads();
min(&freeActorArrPos, actorsThatNeedArrPos);
}
*/
bool zip_actors(Actor* totalActor, unsigned int* freeActors, unsigned int* actorsThatNeedMoving, unsigned int width, unsigned int height) {
// Both vectors have been allocated with the width * height
unsigned int prev_free = width * height + 1;
unsigned int prev_actorThatNeedsMoving = width * height + 1;
for (size_t i = 0; i < width * height; i++){
if (freeActors[i] == 1){
prev_free = i;
if (actorsThatNeedsMoving[i] == 1 )
prev_actorThatNeedsMoving = i;
//Now we determine whether we can make a shift
if (prev_actorThatNeedsMoving != width * height + 1 &&
prev_free != width * height + 1){
// Shift
Actor* actorFreeAddr = &(totalActor[prev_free]);
Actor* actorMoveAddr = &(totalActor[prev_actorThatNeedsMoving]);
totalActor[prev_free] = totalActor[prev_actorThatNeedsMoving];
totalActor[prev_actorThatNeedsMoving] = totalActor[prev_free];
prev_free = width * height + 1;
prev_actorThatNeedsMoving = width * height + 1;
}
}
}
// You MUST pass in a device-vector of the
__host__ void MapActor::moveActorsAround() {
unsigned int* freePositions_h = (unsigned int*) malloc(sizeof(unsigned int) * m_width * m_height);
unsigned int* freePositions_d;
unsigned int* actorsThatNeedMoving_h = (unsigned int*) malloc(sizeof(unsigned int) * m_width * m_height);
unsigned int* actorsThatNeedMoving_d;
cudaMalloc((void**)&freePositions_d, sizeof(unsigned int) * m_width * m_height);
cudaMemset(freePosiitons_d, 0, sizeof(unsigned int) * m_width * m_height);
cudaMalloc((void**)&actorsThatNeedMoving_d, sizeof(unsigned int) * m_width * m_height);
cudaMemset(actorsThatNeedMoving_d, 0, sizeof(unsigned int) * m_width * m_height);
dim3 blockDim((m_width - 1) / NUM_THREADS + 1, (m_height - 1) / NUM_THREADS + 1, 1);
dim3 threadDim(NUM_THREADS, NUM_THREADS, 0);
compute_free<<<blockDim, threadDim>>>(m_map_d, m_width, m_height, freePositions_d);
compute_shifts<<<blockDim, threadDim>>>(m_map_d, m_width, m_height, NUM_THRESHOLD, actorsThatNeedMoving_d);
parallel_zip_actors<<<blockDim, threadDim, 2 * m_width * m_height * sizeof(unsigned int)>>>(freePositions_d, actorsThatNeedMoving_d, m_width, m_height);
//TODO: Compute shifts synchronously
cudaMemcpy(freePositions_h, freePositions_d, sizeof(unsigned int) * m_width * m_height, cudaMemcpyDeviceToHost);
cudaMemcpy(actorsThatNeedMoving_h, actorsThatNeedMoving_d, sizeof(unsigned int) * m_width * m_height, cudaMemcpyDeviceToHost);
zip_actors(freePositions_h, actorsThatNeedMoving_h, m_width, m_height);
free(freePositions_h);
free(actorsThatNeedMoving_h);
cudaFree(actorsThatNeedMoving_d);
cudaFree(freePositions_d);
}
__device__ void MapActor::react() {
}
void MapActor::send(Actor* actor, char* message) {
}
|
aeec81cba1edde32b57da65d74d77d2c0364ff14.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
{
return;
}
/*assert(filterWidth % 2 == 1);*/
float result = 0.f;
int r = thread_2D_pos.y;
int c = thread_2D_pos.x;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
int image_r, image_c;
image_r = r + filter_r;
image_c = c + filter_c;
if (image_r < 0) {
image_r = 0;
} else if (image_r > numRows - 1) {
image_r = numRows - 1;
}
if (image_c < 0) {
image_c = 0;
} else if (image_c > numCols - 1) {
image_c = numCols - 1;
}
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
{
return;
}
uchar4 rgba = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = rgba.x;
greenChannel[thread_1D_pos] = rgba.y;
blueChannel[thread_1D_pos] = rgba.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(filterWidth, filterWidth, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((numCols + filterWidth - 1) / filterWidth, (numRows + filterWidth - 1) / filterWidth, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
|
aeec81cba1edde32b57da65d74d77d2c0364ff14.cu
|
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
{
return;
}
/*assert(filterWidth % 2 == 1);*/
float result = 0.f;
int r = thread_2D_pos.y;
int c = thread_2D_pos.x;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
int image_r, image_c;
image_r = r + filter_r;
image_c = c + filter_c;
if (image_r < 0) {
image_r = 0;
} else if (image_r > numRows - 1) {
image_r = numRows - 1;
}
if (image_c < 0) {
image_c = 0;
} else if (image_c > numCols - 1) {
image_c = numCols - 1;
}
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
{
return;
}
uchar4 rgba = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = rgba.x;
greenChannel[thread_1D_pos] = rgba.y;
blueChannel[thread_1D_pos] = rgba.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(filterWidth, filterWidth, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((numCols + filterWidth - 1) / filterWidth, (numRows + filterWidth - 1) / filterWidth, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
da92b00e16ff14eb173be5142467337b97a66ea9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "../common.h"
#include "../util/timer/timer.h"
#include "./kernel2.cu"
#include "./kernel2_wrapper.h"
void
kernel2_wrapper( knode *knodes,
long knodes_elem,
long knodes_mem,
int order,
long maxheight,
int count,
long *currKnode,
long *offset,
long *lastKnode,
long *offset_2,
int *start,
int *end,
int *recstart,
int *reclength)
{
long long offload_start = get_time();
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
int numBlocks;
numBlocks = count;
int threadsPerBlock;
threadsPerBlock = order < 256 ? order : 256;
printf("# of blocks = %d, # of threads/block = %d (ensure that device can handle)\n", numBlocks, threadsPerBlock);
//==================================================50
// knodesD
//==================================================50
knode *knodesD;
hipMalloc((void**)&knodesD, knodes_mem);
//==================================================50
// currKnodeD
//==================================================50
long *currKnodeD;
hipMalloc((void**)&currKnodeD, count*sizeof(long));
//==================================================50
// offsetD
//==================================================50
long *offsetD;
hipMalloc((void**)&offsetD, count*sizeof(long));
//==================================================50
// lastKnodeD
//==================================================50
long *lastKnodeD;
hipMalloc((void**)&lastKnodeD, count*sizeof(long));
//==================================================50
// offset_2D
//==================================================50
long *offset_2D;
hipMalloc((void**)&offset_2D, count*sizeof(long));
//==================================================50
// startD
//==================================================50
int *startD;
hipMalloc((void**)&startD, count*sizeof(int));
//==================================================50
// endD
//==================================================50
int *endD;
hipMalloc((void**)&endD, count*sizeof(int));
//==================================================50
// ansDStart
//==================================================50
int *ansDStart;
hipMalloc((void**)&ansDStart, count*sizeof(int));
//==================================================50
// ansDLength
//==================================================50
int *ansDLength;
hipMalloc((void**)&ansDLength, count*sizeof(int));
hipMemcpyAsync(knodesD, knodes, knodes_mem, hipMemcpyHostToDevice, 0);
hipMemcpyAsync(currKnodeD, currKnode, count*sizeof(long), hipMemcpyHostToDevice, 0);
hipMemcpyAsync(offsetD, offset, count*sizeof(long), hipMemcpyHostToDevice, 0);
hipMemcpyAsync(lastKnodeD, lastKnode, count*sizeof(long), hipMemcpyHostToDevice, 0);
hipMemcpyAsync(offset_2D, offset_2, count*sizeof(long), hipMemcpyHostToDevice, 0);
hipMemcpyAsync(startD, start, count*sizeof(int), hipMemcpyHostToDevice, 0);
hipMemcpyAsync(endD, end, count*sizeof(int), hipMemcpyHostToDevice, 0);
hipMemcpyAsync(ansDStart, recstart, count*sizeof(int), hipMemcpyHostToDevice, 0);
hipMemcpyAsync(ansDLength, reclength, count*sizeof(int), hipMemcpyHostToDevice, 0);
hipDeviceSynchronize();
long long kernel_start = get_time();
// [GPU] findRangeK kernel
hipLaunchKernelGGL(( findRangeK), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, maxheight,
knodesD,
knodes_elem,
currKnodeD,
offsetD,
lastKnodeD,
offset_2D,
startD,
endD,
ansDStart,
ansDLength);
hipDeviceSynchronize();
long long kernel_end = get_time();
printf("Kernel execution time: %f (us)\n", (float)(kernel_end-kernel_start));
hipMemcpyAsync(recstart, ansDStart, count*sizeof(int), hipMemcpyDeviceToHost, 0);
hipMemcpyAsync(reclength, ansDLength, count*sizeof(int), hipMemcpyDeviceToHost, 0);
hipFree(knodesD);
hipFree(currKnodeD);
hipFree(offsetD);
hipFree(lastKnodeD);
hipFree(offset_2D);
hipFree(startD);
hipFree(endD);
hipFree(ansDStart);
hipFree(ansDLength);
#ifdef DEBUG
for (int i = 0; i < count; i++)
printf("recstart[%d] = %d\n", i, recstart[i]);
for (int i = 0; i < count; i++)
printf("reclength[%d] = %d\n", i, reclength[i]);
#endif
}
|
da92b00e16ff14eb173be5142467337b97a66ea9.cu
|
#include <cuda.h>
#include <stdio.h>
#include "../common.h"
#include "../util/timer/timer.h"
#include "./kernel2.cu"
#include "./kernel2_wrapper.h"
void
kernel2_wrapper( knode *knodes,
long knodes_elem,
long knodes_mem,
int order,
long maxheight,
int count,
long *currKnode,
long *offset,
long *lastKnode,
long *offset_2,
int *start,
int *end,
int *recstart,
int *reclength)
{
long long offload_start = get_time();
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
int numBlocks;
numBlocks = count;
int threadsPerBlock;
threadsPerBlock = order < 256 ? order : 256;
printf("# of blocks = %d, # of threads/block = %d (ensure that device can handle)\n", numBlocks, threadsPerBlock);
//==================================================50
// knodesD
//==================================================50
knode *knodesD;
cudaMalloc((void**)&knodesD, knodes_mem);
//==================================================50
// currKnodeD
//==================================================50
long *currKnodeD;
cudaMalloc((void**)&currKnodeD, count*sizeof(long));
//==================================================50
// offsetD
//==================================================50
long *offsetD;
cudaMalloc((void**)&offsetD, count*sizeof(long));
//==================================================50
// lastKnodeD
//==================================================50
long *lastKnodeD;
cudaMalloc((void**)&lastKnodeD, count*sizeof(long));
//==================================================50
// offset_2D
//==================================================50
long *offset_2D;
cudaMalloc((void**)&offset_2D, count*sizeof(long));
//==================================================50
// startD
//==================================================50
int *startD;
cudaMalloc((void**)&startD, count*sizeof(int));
//==================================================50
// endD
//==================================================50
int *endD;
cudaMalloc((void**)&endD, count*sizeof(int));
//==================================================50
// ansDStart
//==================================================50
int *ansDStart;
cudaMalloc((void**)&ansDStart, count*sizeof(int));
//==================================================50
// ansDLength
//==================================================50
int *ansDLength;
cudaMalloc((void**)&ansDLength, count*sizeof(int));
cudaMemcpyAsync(knodesD, knodes, knodes_mem, cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(currKnodeD, currKnode, count*sizeof(long), cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(offsetD, offset, count*sizeof(long), cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(lastKnodeD, lastKnode, count*sizeof(long), cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(offset_2D, offset_2, count*sizeof(long), cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(startD, start, count*sizeof(int), cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(endD, end, count*sizeof(int), cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(ansDStart, recstart, count*sizeof(int), cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(ansDLength, reclength, count*sizeof(int), cudaMemcpyHostToDevice, 0);
cudaDeviceSynchronize();
long long kernel_start = get_time();
// [GPU] findRangeK kernel
findRangeK<<<numBlocks, threadsPerBlock>>>( maxheight,
knodesD,
knodes_elem,
currKnodeD,
offsetD,
lastKnodeD,
offset_2D,
startD,
endD,
ansDStart,
ansDLength);
cudaDeviceSynchronize();
long long kernel_end = get_time();
printf("Kernel execution time: %f (us)\n", (float)(kernel_end-kernel_start));
cudaMemcpyAsync(recstart, ansDStart, count*sizeof(int), cudaMemcpyDeviceToHost, 0);
cudaMemcpyAsync(reclength, ansDLength, count*sizeof(int), cudaMemcpyDeviceToHost, 0);
cudaFree(knodesD);
cudaFree(currKnodeD);
cudaFree(offsetD);
cudaFree(lastKnodeD);
cudaFree(offset_2D);
cudaFree(startD);
cudaFree(endD);
cudaFree(ansDStart);
cudaFree(ansDLength);
#ifdef DEBUG
for (int i = 0; i < count; i++)
printf("recstart[%d] = %d\n", i, recstart[i]);
for (int i = 0; i < count; i++)
printf("reclength[%d] = %d\n", i, reclength[i]);
#endif
}
|
86e4b2841632ea0db4c89c34adc3840e45d47c41.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
date: 27-06-21
File: Activations.cu
Author : Facundo Martin Cabrera
Email: [email protected] [email protected]
GitHub: https://github.com/cabre94
GitLab: https://gitlab.com/cabre94
Description:
//TODO - Ver si puedo poner un getter que de el d_elem por referencia, asi lo puedo dejar privado
*/
// si prefiere trabajar con indices de fila y columna
// estos macros son utiles:
// C[IDX2C(i,j,M)] == valor en fila i (=0,...,Width-1) columna (j=0,1,...Height-1), row-major-C
// #define IDX2C(i,j,ld) (((j)*(ld))+( i ))
// C[IDX2F(i,j,M)] == valor en fila i (=1,...,Width) columna (j=1,...Height), column-major-F
// #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1))
#include "Matrix.h"
Matrix::Matrix(int height, int width) : height(height), width(width), size(width * height){
h_elem = new float[size];
// float aux[3] = {-1, 0 , 1};
for(size_t i=0; i < size; ++i){
h_elem[i] = i;
// h_elem[i] = aux[i%3];
}
// Allocacion en device
hipMalloc(&d_elem, size * sizeof(float));
hipMemcpy( d_elem, h_elem, size * sizeof(float), hipMemcpyHostToDevice);
}
Matrix::~Matrix(){
delete [] h_elem;
hipFree(d_elem);
}
void Matrix::copyDeviceToHost(){
hipMemcpy(h_elem, d_elem, size * sizeof(float), hipMemcpyDeviceToHost);
}
void Matrix::copyHostToDevice(){
hipMemcpy(d_elem, h_elem, size * sizeof(float), hipMemcpyHostToDevice );
}
void Matrix::print(){
for(int i=0; i < height; ++i){
for(int j=0; j < width; ++j)
std::cout << h_elem[i*width + j] << "\t";
std::cout << std::endl;
}
}
/*
------------------------------
class Activation{
private:
std::string name;
public:
__host__ __device__ Activation(std::string name_); //Default constructor
__host__ __device__ virtual ~Activation();
Activation(const Activation &) = delete; //Copy constructor
Activation &operator=(const Activation &) = delete; //Copy assignment
Activation(Activation &&) = delete; //Move constructor
Activation &operator=(Activation &&) = delete; // Move assignment
__host__ __device__ std::string getName();
__host__ __device__ void call() = 0;
};
__host__ __device__ Activation::Activation(std::string name_) : name(name_) {}
__host__ __device__ Activation::~Activation(){}
__host__ __device__ std::string Activation::getName(){
return name;
}
class Sigmoid : public Activation{
public:
__host__ __device__ Sigmoid(ActivationColour C);
__host__ __device__ ~Sigmoid();
void printActivation();
__host__ __device__ std::string getName();
};
Sigmoid::Sigmoid(PieceColour C):Activation(C,PAWN,"Sigmoid") {}
*/
// __device__ __host__ float sigmoid(int x){
__device__ __host__
float sigmoid(int x){
return 1.0f / (1 + expf(-x));
}
__global__ void sigmoidKernel(float* d_e, int size){
int i = blockIdx.x * blockDim.x + threadIdx.x;
while(i < size){
d_e[i] = sigmoid(d_e[i]);
i += blockDim.x*gridDim.x;
}
}
// int main(int argc, const char** argv) {
// Matrix A(3, 2);
// A.print();
// std::cout << std::endl;
// // dim3 nThreads(256); // CORREGIR
// // dim3 nBlocks((A.size + nThreads.x - 1) / nThreads.x); // CORREGIR
// // if(nBlocks.x>65535)
// // nBlocks.x=65535;
// // sigmoidKernel<<< nBlocks, nThreads >>>(A, A.size);
// // sigmoidKernel<<< 1, 6 >>>(&A, A.size);
// sigmoidKernel<<< 1, 6 >>>(A.d_elem, A.size);
// hipDeviceSynchronize();
// A.print();
// std::cout << std::endl;
// A.copyDeviceToHost();
// A.print();
// std::cout << std::endl;
// return 0;
// }
|
86e4b2841632ea0db4c89c34adc3840e45d47c41.cu
|
/*
date: 27-06-21
File: Activations.cu
Author : Facundo Martin Cabrera
Email: [email protected] [email protected]
GitHub: https://github.com/cabre94
GitLab: https://gitlab.com/cabre94
Description:
//TODO - Ver si puedo poner un getter que de el d_elem por referencia, asi lo puedo dejar privado
*/
// si prefiere trabajar con indices de fila y columna
// estos macros son utiles:
// C[IDX2C(i,j,M)] == valor en fila i (=0,...,Width-1) columna (j=0,1,...Height-1), row-major-C
// #define IDX2C(i,j,ld) (((j)*(ld))+( i ))
// C[IDX2F(i,j,M)] == valor en fila i (=1,...,Width) columna (j=1,...Height), column-major-F
// #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1))
#include "Matrix.h"
Matrix::Matrix(int height, int width) : height(height), width(width), size(width * height){
h_elem = new float[size];
// float aux[3] = {-1, 0 , 1};
for(size_t i=0; i < size; ++i){
h_elem[i] = i;
// h_elem[i] = aux[i%3];
}
// Allocacion en device
cudaMalloc(&d_elem, size * sizeof(float));
cudaMemcpy( d_elem, h_elem, size * sizeof(float), cudaMemcpyHostToDevice);
}
Matrix::~Matrix(){
delete [] h_elem;
cudaFree(d_elem);
}
void Matrix::copyDeviceToHost(){
cudaMemcpy(h_elem, d_elem, size * sizeof(float), cudaMemcpyDeviceToHost);
}
void Matrix::copyHostToDevice(){
cudaMemcpy(d_elem, h_elem, size * sizeof(float), cudaMemcpyHostToDevice );
}
void Matrix::print(){
for(int i=0; i < height; ++i){
for(int j=0; j < width; ++j)
std::cout << h_elem[i*width + j] << "\t";
std::cout << std::endl;
}
}
/*
------------------------------
class Activation{
private:
std::string name;
public:
__host__ __device__ Activation(std::string name_); //Default constructor
__host__ __device__ virtual ~Activation();
Activation(const Activation &) = delete; //Copy constructor
Activation &operator=(const Activation &) = delete; //Copy assignment
Activation(Activation &&) = delete; //Move constructor
Activation &operator=(Activation &&) = delete; // Move assignment
__host__ __device__ std::string getName();
__host__ __device__ void call() = 0;
};
__host__ __device__ Activation::Activation(std::string name_) : name(name_) {}
__host__ __device__ Activation::~Activation(){}
__host__ __device__ std::string Activation::getName(){
return name;
}
class Sigmoid : public Activation{
public:
__host__ __device__ Sigmoid(ActivationColour C);
__host__ __device__ ~Sigmoid();
void printActivation();
__host__ __device__ std::string getName();
};
Sigmoid::Sigmoid(PieceColour C):Activation(C,PAWN,"Sigmoid") {}
*/
// __device__ __host__ float sigmoid(int x){
__device__ __host__
float sigmoid(int x){
return 1.0f / (1 + expf(-x));
}
__global__ void sigmoidKernel(float* d_e, int size){
int i = blockIdx.x * blockDim.x + threadIdx.x;
while(i < size){
d_e[i] = sigmoid(d_e[i]);
i += blockDim.x*gridDim.x;
}
}
// int main(int argc, const char** argv) {
// Matrix A(3, 2);
// A.print();
// std::cout << std::endl;
// // dim3 nThreads(256); // CORREGIR
// // dim3 nBlocks((A.size + nThreads.x - 1) / nThreads.x); // CORREGIR
// // if(nBlocks.x>65535)
// // nBlocks.x=65535;
// // sigmoidKernel<<< nBlocks, nThreads >>>(A, A.size);
// // sigmoidKernel<<< 1, 6 >>>(&A, A.size);
// sigmoidKernel<<< 1, 6 >>>(A.d_elem, A.size);
// cudaDeviceSynchronize();
// A.print();
// std::cout << std::endl;
// A.copyDeviceToHost();
// A.print();
// std::cout << std::endl;
// return 0;
// }
|
cf821a73d7b0fea7f707bb95f76a5ebdf6d65b93.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
// Simple define to index into a 1D array from 2D space
#define I2D(num, c, r) ((r)*(num)+(c))
/*
* `step_kernel_mod` is currently a direct copy of the CPU reference solution
* `step_kernel_ref` below. Accelerate it to run as a CUDA kernel.
*/
__global__ void step_kernel_mod(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i>0) && (i<ni-1) && (j>0) && (j<nj-1))
{
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
void step_kernel_ref(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
// loop over all points in domain (except boundary)
for ( int j=1; j < nj-1; j++ ) {
for ( int i=1; i < ni-1; i++ ) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
}
int main()
{
int istep;
int nstep = 200; // number of time steps
// Specify our 2D dimensions
const int ni = 200;
const int nj = 100;
float tfac = 8.418e-5; // thermal diffusivity of silver
float *temp1_ref, *temp2_ref, *temp1, *temp2, *temp_tmp;
const int size = ni * nj * sizeof(float);
temp1_ref = (float*)malloc(size);
temp2_ref = (float*)malloc(size);
hipMallocManaged (&temp1, size);
hipMallocManaged (&temp2, size);
// Initialize with random data
for( int i = 0; i < ni*nj; ++i) {
temp1_ref[i] = temp2_ref[i] = temp1[i] = temp2[i] = (float)rand()/(float)(RAND_MAX/100.0f);
}
// Execute the CPU-only reference version
for (istep=0; istep < nstep; istep++) {
step_kernel_ref(ni, nj, tfac, temp1_ref, temp2_ref);
// swap the temperature pointers
temp_tmp = temp1_ref;
temp1_ref = temp2_ref;
temp2_ref= temp_tmp;
}
// Execute the modified version using same data
dim3 blocks(16, 16, 1);
dim3 threads( (ni / blocks.x) + 1, (nj / blocks.y) + 1, 1 );
hipError_t ierrSync, ierrAsync;
for (istep=0; istep < nstep; istep++) {
hipLaunchKernelGGL(( step_kernel_mod), dim3(blocks), dim3(threads), 0, 0, ni, nj, tfac, temp1, temp2);
ierrSync = hipGetLastError();
if (ierrSync != hipSuccess) {
printf("Sync error: %s\n", hipGetErrorString(ierrSync));
}
ierrAsync = hipDeviceSynchronize();
if (ierrAsync != hipSuccess) {
printf("Async error: %s\n", hipGetErrorString(ierrAsync));
}
// swap the temperature pointers
temp_tmp = temp1;
temp1 = temp2;
temp2= temp_tmp;
}
float maxError = 0;
// Output should always be stored in the temp1 and temp1_ref at this point
for( int i = 0; i < ni*nj; ++i ) {
if (abs(temp1[i]-temp1_ref[i]) > maxError) { maxError = abs(temp1[i]-temp1_ref[i]); }
}
// Check and see if our maxError is greater than an error bound
if (maxError > 0.0005f)
printf("Problem! The Max Error of %.5f is NOT within acceptable bounds.\n", maxError);
else
printf("The Max Error of %.5f is within acceptable bounds.\n", maxError);
free( temp1_ref );
free( temp2_ref );
hipFree(temp1);
hipFree(temp2);
return 0;
}
|
cf821a73d7b0fea7f707bb95f76a5ebdf6d65b93.cu
|
#include <stdio.h>
#include <math.h>
// Simple define to index into a 1D array from 2D space
#define I2D(num, c, r) ((r)*(num)+(c))
/*
* `step_kernel_mod` is currently a direct copy of the CPU reference solution
* `step_kernel_ref` below. Accelerate it to run as a CUDA kernel.
*/
__global__ void step_kernel_mod(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i>0) && (i<ni-1) && (j>0) && (j<nj-1))
{
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
void step_kernel_ref(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
// loop over all points in domain (except boundary)
for ( int j=1; j < nj-1; j++ ) {
for ( int i=1; i < ni-1; i++ ) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
}
int main()
{
int istep;
int nstep = 200; // number of time steps
// Specify our 2D dimensions
const int ni = 200;
const int nj = 100;
float tfac = 8.418e-5; // thermal diffusivity of silver
float *temp1_ref, *temp2_ref, *temp1, *temp2, *temp_tmp;
const int size = ni * nj * sizeof(float);
temp1_ref = (float*)malloc(size);
temp2_ref = (float*)malloc(size);
cudaMallocManaged (&temp1, size);
cudaMallocManaged (&temp2, size);
// Initialize with random data
for( int i = 0; i < ni*nj; ++i) {
temp1_ref[i] = temp2_ref[i] = temp1[i] = temp2[i] = (float)rand()/(float)(RAND_MAX/100.0f);
}
// Execute the CPU-only reference version
for (istep=0; istep < nstep; istep++) {
step_kernel_ref(ni, nj, tfac, temp1_ref, temp2_ref);
// swap the temperature pointers
temp_tmp = temp1_ref;
temp1_ref = temp2_ref;
temp2_ref= temp_tmp;
}
// Execute the modified version using same data
dim3 blocks(16, 16, 1);
dim3 threads( (ni / blocks.x) + 1, (nj / blocks.y) + 1, 1 );
cudaError_t ierrSync, ierrAsync;
for (istep=0; istep < nstep; istep++) {
step_kernel_mod<<<blocks, threads>>>(ni, nj, tfac, temp1, temp2);
ierrSync = cudaGetLastError();
if (ierrSync != cudaSuccess) {
printf("Sync error: %s\n", cudaGetErrorString(ierrSync));
}
ierrAsync = cudaDeviceSynchronize();
if (ierrAsync != cudaSuccess) {
printf("Async error: %s\n", cudaGetErrorString(ierrAsync));
}
// swap the temperature pointers
temp_tmp = temp1;
temp1 = temp2;
temp2= temp_tmp;
}
float maxError = 0;
// Output should always be stored in the temp1 and temp1_ref at this point
for( int i = 0; i < ni*nj; ++i ) {
if (abs(temp1[i]-temp1_ref[i]) > maxError) { maxError = abs(temp1[i]-temp1_ref[i]); }
}
// Check and see if our maxError is greater than an error bound
if (maxError > 0.0005f)
printf("Problem! The Max Error of %.5f is NOT within acceptable bounds.\n", maxError);
else
printf("The Max Error of %.5f is within acceptable bounds.\n", maxError);
free( temp1_ref );
free( temp2_ref );
cudaFree(temp1);
cudaFree(temp2);
return 0;
}
|
bdf3e8aa6023f5b75c95618c42624057c0856d53.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2018 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../helpers.h"
#include "gtest/gtest.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
namespace xgboost {
namespace tree {
template <typename GradientSumT>
void BuildGidx(DeviceShard<GradientSumT>* shard, int n_rows, int n_cols,
bst_float sparsity=0) {
auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3);
const SparsePage& batch = *(*dmat)->GetRowBatches().begin();
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
auto is_dense = (*dmat)->Info().num_nonzero_ ==
(*dmat)->Info().num_row_ * (*dmat)->Info().num_col_;
shard->InitCompressedData(cmat, batch, is_dense);
delete dmat;
}
TEST(GpuHist, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientPairPrecise> shard(0, 0, kNRows, param);
BuildGidx(&shard, kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_EQ(shard.ellpack_matrix.row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(GpuHist, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientPairPrecise> shard(0, 0, kNRows, param);
BuildGidx(&shard, kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(shard.ellpack_matrix.row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * shard.ellpack_matrix.row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(GPUHistBuilderBase<GradientSumT>& builder) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 6;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientSumT> shard(0, 0, kNRows, param);
BuildGidx(&shard, kNRows, kNCols);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(kNRows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gpair = GradientPair(grad, hess);
}
int num_symbols = shard.n_bins + 1;
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (
shard.gidx_buffer.size());
common::CompressedByteT* d_gidx_buffer_ptr = shard.gidx_buffer.data();
dh::safe_cuda(hipMemcpy(h_gidx_buffer.data(), d_gidx_buffer_ptr,
sizeof(common::CompressedByteT) * shard.gidx_buffer.size(),
hipMemcpyDeviceToHost));
auto gidx = common::CompressedIterator<uint32_t>(h_gidx_buffer.data(),
num_symbols);
shard.ridx_segments.resize(1);
shard.ridx_segments[0] = Segment(0, kNRows);
shard.hist.AllocateHistogram(0);
dh::CopyVectorToDeviceSpan(shard.gpair, h_gpair);
thrust::sequence(
thrust::device_pointer_cast(shard.ridx.Current()),
thrust::device_pointer_cast(shard.ridx.Current() + shard.ridx.Size()));
builder.Build(&shard, 0);
DeviceHistogram<GradientSumT> d_hist = shard.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(hipMemcpy(h_result.data(), node_histogram.data(), data_size,
hipMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
GlobalMemHistBuilder<GradientPairPrecise> double_builder;
TestBuildHist(double_builder);
GlobalMemHistBuilder<GradientPair> float_builder;
TestBuildHist(float_builder);
}
TEST(GpuHist, BuildHistSharedMem) {
SharedMemHistBuilder<GradientPairPrecise> double_builder;
TestBuildHist(double_builder);
SharedMemHistBuilder<GradientPair> float_builder;
TestBuildHist(float_builder);
}
common::HistCutMatrix GetHostCutMatrix () {
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.colsample_bynode = 1;
param.colsample_bylevel = 1;
param.colsample_bytree = 1;
param.min_child_weight = 0.01;
// Disable all parameters.
param.reg_alpha = 0.0;
param.reg_lambda = 0;
param.max_delta_step = 0.0;
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize DeviceShard
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard {
new DeviceShard<GradientPairPrecise>(0, 0, kNRows, param)};
// Initialize DeviceShard::node_sum_gradients
shard->node_sum_gradients = {{6.4f, 12.8f}};
// Initialize DeviceShard::cut
common::HistCutMatrix cmat = GetHostCutMatrix();
// Copy cut matrix to device.
shard->ba.Allocate(0,
&(shard->feature_segments), cmat.row_ptr.size(),
&(shard->min_fvalue), cmat.min_val.size(),
&(shard->gidx_fvalue_map), 24,
&(shard->monotone_constraints), kNCols);
dh::CopyVectorToDeviceSpan(shard->feature_segments, cmat.row_ptr);
dh::CopyVectorToDeviceSpan(shard->gidx_fvalue_map, cmat.cut);
dh::CopyVectorToDeviceSpan(shard->monotone_constraints,
param.monotone_constraints);
shard->ellpack_matrix.feature_segments = shard->feature_segments;
shard->ellpack_matrix.gidx_fvalue_map = shard->gidx_fvalue_map;
dh::CopyVectorToDeviceSpan(shard->min_fvalue, cmat.min_val);
shard->ellpack_matrix.min_fvalue = shard->min_fvalue;
// Initialize DeviceShard::hist
shard->hist.Init(0, (max_bins - 1) * kNCols);
shard->hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(shard->hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
shard->hist.Data().begin());
// Initialize GPUHistMaker
GPUHistMakerSpecialised<GradientPairPrecise> hist_maker =
GPUHistMakerSpecialised<GradientPairPrecise>();
hist_maker.param_ = param;
hist_maker.shards_.push_back(std::move(shard));
hist_maker.column_sampler_.Init(kNCols,
param.colsample_bynode,
param.colsample_bylevel,
param.colsample_bytree,
false);
RegTree tree;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
hist_maker.info_ = &info;
hist_maker.node_value_constraints_.resize(1);
hist_maker.node_value_constraints_[0].lower_bound = -1.0;
hist_maker.node_value_constraints_[0].upper_bound = 1.0;
std::vector<DeviceSplitCandidate> res =
hist_maker.EvaluateSplits({ 0,0 }, &tree);
ASSERT_EQ(res[0].findex, 7);
ASSERT_EQ(res[1].findex, 7);
ASSERT_NEAR(res[0].fvalue, 0.26, xgboost::kRtEps);
ASSERT_NEAR(res[1].fvalue, 0.26, xgboost::kRtEps);
}
TEST(GpuHist, ApplySplit) {
GPUHistMakerSpecialised<GradientPairPrecise> hist_maker =
GPUHistMakerSpecialised<GradientPairPrecise>();
int constexpr kNId = 0;
int constexpr kNRows = 16;
int constexpr kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args = {};
param.InitAllowUnknown(args);
// Initialize shard
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
hist_maker.shards_.resize(1);
hist_maker.shards_[0].reset(new DeviceShard<GradientPairPrecise>(0, 0, kNRows, param));
auto& shard = hist_maker.shards_.at(0);
shard->ridx_segments.resize(3); // 3 nodes.
shard->node_sum_gradients.resize(3);
shard->ridx_segments[0] = Segment(0, kNRows);
shard->ba.Allocate(0, &(shard->ridx), kNRows,
&(shard->position), kNRows);
shard->ellpack_matrix.row_stride = kNCols;
thrust::sequence(
thrust::device_pointer_cast(shard->ridx.Current()),
thrust::device_pointer_cast(shard->ridx.Current() + shard->ridx.Size()));
// Initialize GPUHistMaker
hist_maker.param_ = param;
RegTree tree;
DeviceSplitCandidate candidate;
candidate.Update(2, kLeftDir,
0.59, 4, // fvalue has to be equal to one of the cut field
GradientPair(8.2, 2.8), GradientPair(6.3, 3.6),
GPUTrainingParam(param));
ExpandEntry candidate_entry {0, 0, candidate, 0};
candidate_entry.nid = kNId;
// Used to get bin_id in update position.
common::HistCutMatrix cmat = GetHostCutMatrix();
hist_maker.hmat_ = cmat;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
info.num_nonzero_ = kNRows * kNCols; // Dense
// Initialize gidx
int n_bins = 24;
int row_stride = kNCols;
int num_symbols = n_bins + 1;
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * kNRows,
num_symbols);
shard->ba.Allocate(0, &(shard->gidx_buffer), compressed_size_bytes,
&(shard->feature_segments), cmat.row_ptr.size(),
&(shard->min_fvalue), cmat.min_val.size(),
&(shard->gidx_fvalue_map), 24);
dh::CopyVectorToDeviceSpan(shard->feature_segments, cmat.row_ptr);
dh::CopyVectorToDeviceSpan(shard->gidx_fvalue_map, cmat.cut);
shard->ellpack_matrix.feature_segments = shard->feature_segments;
shard->ellpack_matrix.gidx_fvalue_map = shard->gidx_fvalue_map;
dh::CopyVectorToDeviceSpan(shard->min_fvalue, cmat.min_val);
shard->ellpack_matrix.min_fvalue = shard->min_fvalue;
shard->ellpack_matrix.is_dense = true;
common::CompressedBufferWriter wr(num_symbols);
// gidx 14 should go right, 12 goes left
std::vector<int> h_gidx (kNRows * row_stride, 14);
h_gidx[4] = 12;
h_gidx[12] = 12;
std::vector<common::CompressedByteT> h_gidx_compressed (compressed_size_bytes);
wr.Write(h_gidx_compressed.data(), h_gidx.begin(), h_gidx.end());
dh::CopyVectorToDeviceSpan(shard->gidx_buffer, h_gidx_compressed);
shard->ellpack_matrix.gidx_iter = common::CompressedIterator<uint32_t>(
shard->gidx_buffer.data(), num_symbols);
hist_maker.info_ = &info;
hist_maker.ApplySplit(candidate_entry, &tree);
hist_maker.UpdatePosition(candidate_entry, &tree);
ASSERT_FALSE(tree[kNId].IsLeaf());
int left_nidx = tree[kNId].LeftChild();
int right_nidx = tree[kNId].RightChild();
ASSERT_EQ(shard->ridx_segments[left_nidx].begin, 0);
ASSERT_EQ(shard->ridx_segments[left_nidx].end, 2);
ASSERT_EQ(shard->ridx_segments[right_nidx].begin, 2);
ASSERT_EQ(shard->ridx_segments[right_nidx].end, 16);
}
void TestSortPosition(const std::vector<int>& position_in, int left_idx,
int right_idx) {
std::vector<int64_t> left_count = {
std::count(position_in.begin(), position_in.end(), left_idx)};
thrust::device_vector<int64_t> d_left_count = left_count;
thrust::device_vector<int> position = position_in;
thrust::device_vector<int> position_out(position.size());
thrust::device_vector<bst_uint> ridx(position.size());
thrust::sequence(ridx.begin(), ridx.end());
thrust::device_vector<bst_uint> ridx_out(ridx.size());
dh::CubMemory tmp;
SortPosition(
&tmp, common::Span<int>(position.data().get(), position.size()),
common::Span<int>(position_out.data().get(), position_out.size()),
common::Span<bst_uint>(ridx.data().get(), ridx.size()),
common::Span<bst_uint>(ridx_out.data().get(), ridx_out.size()), left_idx,
right_idx, d_left_count.data().get(), nullptr);
thrust::host_vector<int> position_result = position_out;
thrust::host_vector<int> ridx_result = ridx_out;
// Check position is sorted
EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end()));
// Check row indices are sorted inside left and right segment
EXPECT_TRUE(
std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0]));
EXPECT_TRUE(
std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end()));
// Check key value pairs are the same
for (auto i = 0ull; i < ridx_result.size(); i++) {
EXPECT_EQ(position_result[i], position_in[ridx_result[i]]);
}
}
TEST(GpuHist, SortPosition) {
TestSortPosition({1, 2, 1, 2, 1}, 1, 2);
TestSortPosition({1, 1, 1, 1}, 1, 2);
TestSortPosition({2, 2, 2, 2}, 1, 2);
TestSortPosition({1, 2, 1, 2, 3}, 1, 2);
}
} // namespace tree
} // namespace xgboost
|
bdf3e8aa6023f5b75c95618c42624057c0856d53.cu
|
/*!
* Copyright 2017-2018 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../helpers.h"
#include "gtest/gtest.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
namespace xgboost {
namespace tree {
template <typename GradientSumT>
void BuildGidx(DeviceShard<GradientSumT>* shard, int n_rows, int n_cols,
bst_float sparsity=0) {
auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3);
const SparsePage& batch = *(*dmat)->GetRowBatches().begin();
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
auto is_dense = (*dmat)->Info().num_nonzero_ ==
(*dmat)->Info().num_row_ * (*dmat)->Info().num_col_;
shard->InitCompressedData(cmat, batch, is_dense);
delete dmat;
}
TEST(GpuHist, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientPairPrecise> shard(0, 0, kNRows, param);
BuildGidx(&shard, kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_EQ(shard.ellpack_matrix.row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(GpuHist, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientPairPrecise> shard(0, 0, kNRows, param);
BuildGidx(&shard, kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(shard.ellpack_matrix.row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * shard.ellpack_matrix.row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(GPUHistBuilderBase<GradientSumT>& builder) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 6;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientSumT> shard(0, 0, kNRows, param);
BuildGidx(&shard, kNRows, kNCols);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(kNRows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gpair = GradientPair(grad, hess);
}
int num_symbols = shard.n_bins + 1;
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (
shard.gidx_buffer.size());
common::CompressedByteT* d_gidx_buffer_ptr = shard.gidx_buffer.data();
dh::safe_cuda(cudaMemcpy(h_gidx_buffer.data(), d_gidx_buffer_ptr,
sizeof(common::CompressedByteT) * shard.gidx_buffer.size(),
cudaMemcpyDeviceToHost));
auto gidx = common::CompressedIterator<uint32_t>(h_gidx_buffer.data(),
num_symbols);
shard.ridx_segments.resize(1);
shard.ridx_segments[0] = Segment(0, kNRows);
shard.hist.AllocateHistogram(0);
dh::CopyVectorToDeviceSpan(shard.gpair, h_gpair);
thrust::sequence(
thrust::device_pointer_cast(shard.ridx.Current()),
thrust::device_pointer_cast(shard.ridx.Current() + shard.ridx.Size()));
builder.Build(&shard, 0);
DeviceHistogram<GradientSumT> d_hist = shard.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), data_size,
cudaMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
GlobalMemHistBuilder<GradientPairPrecise> double_builder;
TestBuildHist(double_builder);
GlobalMemHistBuilder<GradientPair> float_builder;
TestBuildHist(float_builder);
}
TEST(GpuHist, BuildHistSharedMem) {
SharedMemHistBuilder<GradientPairPrecise> double_builder;
TestBuildHist(double_builder);
SharedMemHistBuilder<GradientPair> float_builder;
TestBuildHist(float_builder);
}
common::HistCutMatrix GetHostCutMatrix () {
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.colsample_bynode = 1;
param.colsample_bylevel = 1;
param.colsample_bytree = 1;
param.min_child_weight = 0.01;
// Disable all parameters.
param.reg_alpha = 0.0;
param.reg_lambda = 0;
param.max_delta_step = 0.0;
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize DeviceShard
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard {
new DeviceShard<GradientPairPrecise>(0, 0, kNRows, param)};
// Initialize DeviceShard::node_sum_gradients
shard->node_sum_gradients = {{6.4f, 12.8f}};
// Initialize DeviceShard::cut
common::HistCutMatrix cmat = GetHostCutMatrix();
// Copy cut matrix to device.
shard->ba.Allocate(0,
&(shard->feature_segments), cmat.row_ptr.size(),
&(shard->min_fvalue), cmat.min_val.size(),
&(shard->gidx_fvalue_map), 24,
&(shard->monotone_constraints), kNCols);
dh::CopyVectorToDeviceSpan(shard->feature_segments, cmat.row_ptr);
dh::CopyVectorToDeviceSpan(shard->gidx_fvalue_map, cmat.cut);
dh::CopyVectorToDeviceSpan(shard->monotone_constraints,
param.monotone_constraints);
shard->ellpack_matrix.feature_segments = shard->feature_segments;
shard->ellpack_matrix.gidx_fvalue_map = shard->gidx_fvalue_map;
dh::CopyVectorToDeviceSpan(shard->min_fvalue, cmat.min_val);
shard->ellpack_matrix.min_fvalue = shard->min_fvalue;
// Initialize DeviceShard::hist
shard->hist.Init(0, (max_bins - 1) * kNCols);
shard->hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(shard->hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
shard->hist.Data().begin());
// Initialize GPUHistMaker
GPUHistMakerSpecialised<GradientPairPrecise> hist_maker =
GPUHistMakerSpecialised<GradientPairPrecise>();
hist_maker.param_ = param;
hist_maker.shards_.push_back(std::move(shard));
hist_maker.column_sampler_.Init(kNCols,
param.colsample_bynode,
param.colsample_bylevel,
param.colsample_bytree,
false);
RegTree tree;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
hist_maker.info_ = &info;
hist_maker.node_value_constraints_.resize(1);
hist_maker.node_value_constraints_[0].lower_bound = -1.0;
hist_maker.node_value_constraints_[0].upper_bound = 1.0;
std::vector<DeviceSplitCandidate> res =
hist_maker.EvaluateSplits({ 0,0 }, &tree);
ASSERT_EQ(res[0].findex, 7);
ASSERT_EQ(res[1].findex, 7);
ASSERT_NEAR(res[0].fvalue, 0.26, xgboost::kRtEps);
ASSERT_NEAR(res[1].fvalue, 0.26, xgboost::kRtEps);
}
TEST(GpuHist, ApplySplit) {
GPUHistMakerSpecialised<GradientPairPrecise> hist_maker =
GPUHistMakerSpecialised<GradientPairPrecise>();
int constexpr kNId = 0;
int constexpr kNRows = 16;
int constexpr kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args = {};
param.InitAllowUnknown(args);
// Initialize shard
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
hist_maker.shards_.resize(1);
hist_maker.shards_[0].reset(new DeviceShard<GradientPairPrecise>(0, 0, kNRows, param));
auto& shard = hist_maker.shards_.at(0);
shard->ridx_segments.resize(3); // 3 nodes.
shard->node_sum_gradients.resize(3);
shard->ridx_segments[0] = Segment(0, kNRows);
shard->ba.Allocate(0, &(shard->ridx), kNRows,
&(shard->position), kNRows);
shard->ellpack_matrix.row_stride = kNCols;
thrust::sequence(
thrust::device_pointer_cast(shard->ridx.Current()),
thrust::device_pointer_cast(shard->ridx.Current() + shard->ridx.Size()));
// Initialize GPUHistMaker
hist_maker.param_ = param;
RegTree tree;
DeviceSplitCandidate candidate;
candidate.Update(2, kLeftDir,
0.59, 4, // fvalue has to be equal to one of the cut field
GradientPair(8.2, 2.8), GradientPair(6.3, 3.6),
GPUTrainingParam(param));
ExpandEntry candidate_entry {0, 0, candidate, 0};
candidate_entry.nid = kNId;
// Used to get bin_id in update position.
common::HistCutMatrix cmat = GetHostCutMatrix();
hist_maker.hmat_ = cmat;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
info.num_nonzero_ = kNRows * kNCols; // Dense
// Initialize gidx
int n_bins = 24;
int row_stride = kNCols;
int num_symbols = n_bins + 1;
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * kNRows,
num_symbols);
shard->ba.Allocate(0, &(shard->gidx_buffer), compressed_size_bytes,
&(shard->feature_segments), cmat.row_ptr.size(),
&(shard->min_fvalue), cmat.min_val.size(),
&(shard->gidx_fvalue_map), 24);
dh::CopyVectorToDeviceSpan(shard->feature_segments, cmat.row_ptr);
dh::CopyVectorToDeviceSpan(shard->gidx_fvalue_map, cmat.cut);
shard->ellpack_matrix.feature_segments = shard->feature_segments;
shard->ellpack_matrix.gidx_fvalue_map = shard->gidx_fvalue_map;
dh::CopyVectorToDeviceSpan(shard->min_fvalue, cmat.min_val);
shard->ellpack_matrix.min_fvalue = shard->min_fvalue;
shard->ellpack_matrix.is_dense = true;
common::CompressedBufferWriter wr(num_symbols);
// gidx 14 should go right, 12 goes left
std::vector<int> h_gidx (kNRows * row_stride, 14);
h_gidx[4] = 12;
h_gidx[12] = 12;
std::vector<common::CompressedByteT> h_gidx_compressed (compressed_size_bytes);
wr.Write(h_gidx_compressed.data(), h_gidx.begin(), h_gidx.end());
dh::CopyVectorToDeviceSpan(shard->gidx_buffer, h_gidx_compressed);
shard->ellpack_matrix.gidx_iter = common::CompressedIterator<uint32_t>(
shard->gidx_buffer.data(), num_symbols);
hist_maker.info_ = &info;
hist_maker.ApplySplit(candidate_entry, &tree);
hist_maker.UpdatePosition(candidate_entry, &tree);
ASSERT_FALSE(tree[kNId].IsLeaf());
int left_nidx = tree[kNId].LeftChild();
int right_nidx = tree[kNId].RightChild();
ASSERT_EQ(shard->ridx_segments[left_nidx].begin, 0);
ASSERT_EQ(shard->ridx_segments[left_nidx].end, 2);
ASSERT_EQ(shard->ridx_segments[right_nidx].begin, 2);
ASSERT_EQ(shard->ridx_segments[right_nidx].end, 16);
}
void TestSortPosition(const std::vector<int>& position_in, int left_idx,
int right_idx) {
std::vector<int64_t> left_count = {
std::count(position_in.begin(), position_in.end(), left_idx)};
thrust::device_vector<int64_t> d_left_count = left_count;
thrust::device_vector<int> position = position_in;
thrust::device_vector<int> position_out(position.size());
thrust::device_vector<bst_uint> ridx(position.size());
thrust::sequence(ridx.begin(), ridx.end());
thrust::device_vector<bst_uint> ridx_out(ridx.size());
dh::CubMemory tmp;
SortPosition(
&tmp, common::Span<int>(position.data().get(), position.size()),
common::Span<int>(position_out.data().get(), position_out.size()),
common::Span<bst_uint>(ridx.data().get(), ridx.size()),
common::Span<bst_uint>(ridx_out.data().get(), ridx_out.size()), left_idx,
right_idx, d_left_count.data().get(), nullptr);
thrust::host_vector<int> position_result = position_out;
thrust::host_vector<int> ridx_result = ridx_out;
// Check position is sorted
EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end()));
// Check row indices are sorted inside left and right segment
EXPECT_TRUE(
std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0]));
EXPECT_TRUE(
std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end()));
// Check key value pairs are the same
for (auto i = 0ull; i < ridx_result.size(); i++) {
EXPECT_EQ(position_result[i], position_in[ridx_result[i]]);
}
}
TEST(GpuHist, SortPosition) {
TestSortPosition({1, 2, 1, 2, 1}, 1, 2);
TestSortPosition({1, 1, 1, 1}, 1, 2);
TestSortPosition({2, 2, 2, 2}, 1, 2);
TestSortPosition({1, 2, 1, 2, 3}, 1, 2);
}
} // namespace tree
} // namespace xgboost
|
5f65d443bf3bfb5e99315760fad2606b935fe928.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
// parameter describing the size of matrix A
const int rows = 4096;
const int cols = 4096;
const int BLOCK_SIZE = 16;
// transpose shared kernel
// transpose kernel
__global__ void transpose_naive(float* a, float*b) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int width = gridDim.x * blockDim.x;
int height = gridDim.y * blockDim.y;
// perform transpose
if (x < height && y < width) {
b[x*height + y] = a[y*width + x];
}
}
|
5f65d443bf3bfb5e99315760fad2606b935fe928.cu
|
#include "includes.h"
using namespace std;
// parameter describing the size of matrix A
const int rows = 4096;
const int cols = 4096;
const int BLOCK_SIZE = 16;
// transpose shared kernel
// transpose kernel
__global__ void transpose_naive(float* a, float*b) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int width = gridDim.x * blockDim.x;
int height = gridDim.y * blockDim.y;
// perform transpose
if (x < height && y < width) {
b[x*height + y] = a[y*width + x];
}
}
|
71a33b9d7506831a026cf23a69e9d15c23800f9f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void divideProcessKernel(float *d_A, int wA,int k)
{
int iIdx = blockDim.x*blockIdx.x + threadIdx.x+k+1;
int kIdx = k;;
if (iIdx>wA-1)
{
return;
}
d_A[iIdx*wA+kIdx] = d_A[iIdx*wA+kIdx]/ d_A[kIdx*wA+kIdx];
}
// Kernel by using global memory
__global__ void updateProcessKernel_global(float *d_A, int wA,int k)
{
int i = blockDim.x*blockIdx.x + threadIdx.x+k+1;
int j = blockDim.y*blockIdx.y + threadIdx.y+k+1;
if (i>wA-1 || j>wA-1)
{
return;
}
int idx = i*wA+j;
d_A[idx] = d_A[idx] - d_A[i*wA+k]*d_A[k*wA+j];
}
// GPU version1
void LUDecomposition_GPU_global(float *d_A,int wA)
{
dim3 ThreadDiv(512,1,1);
dim3 BlockDiv((wA+ThreadDiv.x-1)/ThreadDiv.x,1,1);
for (int k=0; k<wA; k++ )
{
hipLaunchKernelGGL(( divideProcessKernel), dim3(BlockDiv),dim3(ThreadDiv), 0, 0, d_A,wA,k);
dim3 ThreadUpdate(32,16,1);
dim3 BlockUpdate((wA+ThreadUpdate.x-k-1)/ThreadUpdate.x,(wA+ThreadUpdate.x-k-1)/ThreadUpdate.y,1);
hipLaunchKernelGGL(( updateProcessKernel_global), dim3(BlockUpdate),dim3(ThreadUpdate),(ThreadUpdate.x + ThreadUpdate.y) * sizeof(float), 0, d_A,wA,k);
}
}
//vertify result
void VertifyResult(float *LURes, float *A,int wA)
{
float *MulRes = new float[wA*wA];
memset(MulRes,0,sizeof(float)*wA*wA);
float temp;
for (int i=0; i<wA; i++)//
{
for (int j=0; j<wA; j++)//
{
for (int ii=0; ii<=i; ii++)
{
if (i==ii)
{
temp = 1;
}
else
temp = LURes[i*wA+ii];
if (ii>j)
{
continue;
}
MulRes[i*wA+j] += temp*LURes[ii*wA+j];
}
}
}
float temp2;
bool bError = false;
for (int i=0; i<wA; i++)//
{
for (int j=0; j<wA; j++)//
{
temp2 = abs(MulRes[i*wA+j] - A[i*wA+j]);
if (temp2 > 1.000000E-01)
{
printf("Error:%f,%d %d,\n",temp2,i,j);
bError = true;
}
}
}
if (!bError)
{
printf("Pass!\n");
}
}
void GenSimData(int wA)
{
float *A = new float[wA*wA];
srand(time(NULL));
for (int i=0; i<wA; i++)
{
for (int j=0; j<wA; j++)
{
A[i*wA+j] = j;//rand()%99;
if (A[i*wA+j] ==0)
{
A[i*wA+j] ++;
}
}
}
// Save Test Date
FILE *fp;
fp = fopen("Input.txt","w");
if (fp == NULL)
{
return;
}
for (int i=0; i<wA; i++)
{
for (int j=0; j<wA; j++)
{
fprintf(fp,"%f ",A[i*wA+j]);
}
fprintf(fp,"\n");
}
fclose(fp);
delete[] A;
A = NULL;
}
bool ReadSimData(float *A, int wA)
{
// Read Test Date
FILE *fp;
fp = fopen("Input.txt","r");
if (fp == NULL)
{
return false;
}
for (int i=0; i<wA; i++)
{
for (int j=0; j<wA; j++)
{
fscanf(fp,"%f ",&A[i*wA+j]);
}
}
fclose(fp);
return true;
}
bool SaveLuResult(float *A, int wA)
{
// Read Test Date
FILE *fp;
fp = fopen("LURes.txt","w");
if (fp == NULL)
{
return false;
}
for (int i=0; i<wA; i++)
{
for (int j=0; j<wA; j++)
{
fprintf(fp,"%f ",A[i*wA+j]);
}
fprintf(fp,"\n");
}
fclose(fp);
return true;
}
int main()
{
// GPU
int wA = 512;
float *A = new float[wA*wA];
//GenSimData(wA); // Generate simulation Data and save file "Input.txt"
//ReadSimData(A,wA);
srand(time(NULL));
for (int i=0; i<wA; i++)
{
for (int j=0; j<wA; j++)
{
A[i*wA+j] = j;//rand()%99;
if (A[i*wA+j] ==0)
{
A[i*wA+j] ++;
}
}
}
float *LURes = new float[wA*wA];
float *d_A;
hipMalloc((void**)&d_A,sizeof(float)*wA*wA);
hipMemcpy(d_A,A,sizeof(float)*wA*wA,hipMemcpyHostToDevice);
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
LUDecomposition_GPU_global(d_A,wA);
hipEventRecord(stop,0);
hipEventSynchronize( stop );
float costtime;
hipEventElapsedTime(&costtime,start,stop);
printf("Elapsed Time:%f\n",costtime);
hipMemcpy(LURes,d_A,sizeof(float)*wA*wA,hipMemcpyDeviceToHost);
//SaveLuResult(LURes, wA);
//////vertify result
//VertifyResult(LURes,A,wA);
//system("PAUSE");
return 0;
}
|
71a33b9d7506831a026cf23a69e9d15c23800f9f.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
__global__ void divideProcessKernel(float *d_A, int wA,int k)
{
int iIdx = blockDim.x*blockIdx.x + threadIdx.x+k+1;
int kIdx = k;;
if (iIdx>wA-1)
{
return;
}
d_A[iIdx*wA+kIdx] = d_A[iIdx*wA+kIdx]/ d_A[kIdx*wA+kIdx];
}
// Kernel by using global memory
__global__ void updateProcessKernel_global(float *d_A, int wA,int k)
{
int i = blockDim.x*blockIdx.x + threadIdx.x+k+1;
int j = blockDim.y*blockIdx.y + threadIdx.y+k+1;
if (i>wA-1 || j>wA-1)
{
return;
}
int idx = i*wA+j;
d_A[idx] = d_A[idx] - d_A[i*wA+k]*d_A[k*wA+j];
}
// GPU version1
void LUDecomposition_GPU_global(float *d_A,int wA)
{
dim3 ThreadDiv(512,1,1);
dim3 BlockDiv((wA+ThreadDiv.x-1)/ThreadDiv.x,1,1);
for (int k=0; k<wA; k++ )
{
divideProcessKernel<<<BlockDiv,ThreadDiv>>>(d_A,wA,k);
dim3 ThreadUpdate(32,16,1);
dim3 BlockUpdate((wA+ThreadUpdate.x-k-1)/ThreadUpdate.x,(wA+ThreadUpdate.x-k-1)/ThreadUpdate.y,1);
updateProcessKernel_global<<<BlockUpdate,ThreadUpdate,(ThreadUpdate.x + ThreadUpdate.y) * sizeof(float)>>>(d_A,wA,k);
}
}
//vertify result
void VertifyResult(float *LURes, float *A,int wA)
{
float *MulRes = new float[wA*wA];
memset(MulRes,0,sizeof(float)*wA*wA);
float temp;
for (int i=0; i<wA; i++)//ÐÐ
{
for (int j=0; j<wA; j++)//ÁÐ
{
for (int ii=0; ii<=i; ii++)
{
if (i==ii)
{
temp = 1;
}
else
temp = LURes[i*wA+ii];
if (ii>j)
{
continue;
}
MulRes[i*wA+j] += temp*LURes[ii*wA+j];
}
}
}
float temp2;
bool bError = false;
for (int i=0; i<wA; i++)//ÐÐ
{
for (int j=0; j<wA; j++)//ÁÐ
{
temp2 = abs(MulRes[i*wA+j] - A[i*wA+j]);
if (temp2 > 1.000000E-01)
{
printf("Error:%f,%d %d,\n",temp2,i,j);
bError = true;
}
}
}
if (!bError)
{
printf("Pass!\n");
}
}
void GenSimData(int wA)
{
float *A = new float[wA*wA];
srand(time(NULL));
for (int i=0; i<wA; i++)
{
for (int j=0; j<wA; j++)
{
A[i*wA+j] = j;//rand()%99;
if (A[i*wA+j] ==0)
{
A[i*wA+j] ++;
}
}
}
// Save Test Date
FILE *fp;
fp = fopen("Input.txt","w");
if (fp == NULL)
{
return;
}
for (int i=0; i<wA; i++)
{
for (int j=0; j<wA; j++)
{
fprintf(fp,"%f ",A[i*wA+j]);
}
fprintf(fp,"\n");
}
fclose(fp);
delete[] A;
A = NULL;
}
bool ReadSimData(float *A, int wA)
{
// Read Test Date
FILE *fp;
fp = fopen("Input.txt","r");
if (fp == NULL)
{
return false;
}
for (int i=0; i<wA; i++)
{
for (int j=0; j<wA; j++)
{
fscanf(fp,"%f ",&A[i*wA+j]);
}
}
fclose(fp);
return true;
}
bool SaveLuResult(float *A, int wA)
{
// Read Test Date
FILE *fp;
fp = fopen("LURes.txt","w");
if (fp == NULL)
{
return false;
}
for (int i=0; i<wA; i++)
{
for (int j=0; j<wA; j++)
{
fprintf(fp,"%f ",A[i*wA+j]);
}
fprintf(fp,"\n");
}
fclose(fp);
return true;
}
int main()
{
// GPU
int wA = 512;
float *A = new float[wA*wA];
//GenSimData(wA); // Generate simulation Data and save file "Input.txt"
//ReadSimData(A,wA);
srand(time(NULL));
for (int i=0; i<wA; i++)
{
for (int j=0; j<wA; j++)
{
A[i*wA+j] = j;//rand()%99;
if (A[i*wA+j] ==0)
{
A[i*wA+j] ++;
}
}
}
float *LURes = new float[wA*wA];
float *d_A;
cudaMalloc((void**)&d_A,sizeof(float)*wA*wA);
cudaMemcpy(d_A,A,sizeof(float)*wA*wA,cudaMemcpyHostToDevice);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
LUDecomposition_GPU_global(d_A,wA);
cudaEventRecord(stop,0);
cudaEventSynchronize( stop );
float costtime;
cudaEventElapsedTime(&costtime,start,stop);
printf("Elapsed Time:%f\n",costtime);
cudaMemcpy(LURes,d_A,sizeof(float)*wA*wA,cudaMemcpyDeviceToHost);
//SaveLuResult(LURes, wA);
//////vertify result
//VertifyResult(LURes,A,wA);
//system("PAUSE");
return 0;
}
|
96100d19caa56610841d6736bcec7cc1df912391.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "funcionesParalelo_double.h"
#include <math.h>
double GOL_Pdouble(int N, int iteraciones, int MAX_threads){
double *tablero, *tablero_aux;
double *d_tablero, *d_tablero_aux;
clock_t inicio, final;
size_t size = N*N*sizeof(double);
//Asignacion de memoria del lado del host
tablero = (double*)malloc(size);
tablero_aux = (double*)malloc(size);
//Asignacion de memoria del lado de device
hipMalloc(&d_tablero, size);
hipMalloc(&d_tablero_aux, size);
FILE * archivo = fopen(DIR_FILE, "r");
if (archivo==NULL) {fputs ("File error",stderr); exit (1);}
char caracterAuxiliar;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
caracterAuxiliar = fgetc(archivo);
if (caracterAuxiliar == '1'){
tablero_aux[i*N+j]=tablero[i*N+j]=1.0;
}
else {
tablero_aux[i*N+j]=tablero[i*N+j]=0.0;
}
}
}
fclose(archivo);
inicio = clock(); //tiempo inicial
hipMemcpy(d_tablero,tablero,size,hipMemcpyHostToDevice);
hipMemcpy(d_tablero_aux,tablero_aux,size,hipMemcpyHostToDevice);
//VOLVER CONSTANTE
int dim_MAX = (int)sqrt(MAX_threads);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((dim_MAX + dimBlock.x-1)/dimBlock.x, (dim_MAX+dimBlock.y-1)/dimBlock.y);
//dim3 dimGrid(2,9);
//imprimeM_P_double<<<1,1>>>(d_tablero);
//hipDeviceSynchronize();
for (int i = 0; i < iteraciones; ++i)
{
//printf("Iteracion %d\n",i+1);
hipLaunchKernelGGL(( actualiza_Pdouble), dim3(dimGrid),dim3(dimBlock), 0, 0, d_tablero, d_tablero_aux, N, dim_MAX);
hipDeviceSynchronize();//espera a que todos los hilos terminen su ejecucin
hipLaunchKernelGGL(( copiaMatriz_Pdouble), dim3(dimGrid),dim3(dimBlock), 0, 0, d_tablero_aux, d_tablero, N, dim_MAX);
hipDeviceSynchronize();
//imprimeM_Pdouble<<<1,1>>>(d_tablero,N);
//hipDeviceSynchronize();
}
final = clock();
double tiempo = ((double)final - inicio) / CLOCKS_PER_SEC;
//printf("el tiempo final es %f\n", tiempo);
//timepo final
free(tablero);
free(tablero_aux);
hipFree(d_tablero);
hipFree(d_tablero_aux);
return tiempo;
}
/*
* Funcion para actualizar la matriz, intercambiandola entre la auxiliar
* y la matriz principal.
*/
__global__ void actualiza_Pdouble(double *malla, double *aux, int N, int dim_MAX){
int contador=0;
int celActual;
int i = blockDim.x * blockIdx.x + threadIdx.x; //fila
int j = blockDim.y * blockIdx.y + threadIdx.y; //Columna
if (i < N && j < N) {
int ii = (int)(N/dim_MAX)+1;
for(int k = 0; k < ii; k++){
celActual = i*N+j;
if(celActual < N*N){
//printf("%d ",celActual);
//Izquierda Arriba
if(i>0 && j>0 && malla[celActual-N-1]==1){
contador++;
}
//Arriba
if(i>0 && malla[celActual-N]==1){
contador++;
}
//Arriba derecha
if(i>0 && j<N-1 && malla[celActual+1-N]==1){
contador++;
}
//Izquierda
if(j>0 && malla[celActual-1]==1){
contador++;
}
//Derecha
if(j<N-1 && malla[celActual+1]==1){
contador++;
}
//Abajo izquierda
if(i<N-1 && j>0 && malla[celActual+N-1]==1){
contador++;
}
//Abajo
if(i<N-1 && malla[celActual+N]==1){
contador++;
}
//Abajo derecha
if(i<N-1 && j<N-1 && malla[celActual+1+N]==1){
contador++;
}
if(malla[celActual]==1){ //Actuamos sobre las celulas en la copia de la matriz
if(contador==2 || contador==3){//La celulas vivas con 2 o 3 celulas vivas pegadas, se mantiene vivas.
aux[celActual]=1;
}
else{ //Si no se cumple la condicion, mueren.
aux[celActual]=0;
}
}
else{
if(contador==3){ //Las celulas muertas con 3 celulas vivas pegadas, resucitan.
aux[celActual]=1;
}
}
contador=0;
}
celActual = celActual + dim_MAX;
}
}
}
__global__ void copiaMatriz_Pdouble(double *malla, double *aux, int N, int dim_MAX){
int celActual;
int i = blockDim.x * blockIdx.x + threadIdx.x; //fila
int j = blockDim.y * blockIdx.y + threadIdx.y; //Columna
if (i < N && j < N) {
int ii = (int)(N/dim_MAX)+1;
for(int k = 0; k < ii; k++){
celActual = i*N+j;
if(celActual < N*N){
aux[celActual] = malla[celActual];
celActual = celActual + dim_MAX;
}
}
}
}
/*
* Funcion para imprimir la matriz
* Como entradas son la malla y el tamao
*/
__global__ void imprimeM_Pdouble(double *m, int N){
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
{
if (m[i*N+j]==1)
{
printf("* ");
}
else{
printf("- ");
}
}
printf("\n");
}
}
|
96100d19caa56610841d6736bcec7cc1df912391.cu
|
#include "funcionesParalelo_double.h"
#include <math.h>
double GOL_Pdouble(int N, int iteraciones, int MAX_threads){
double *tablero, *tablero_aux;
double *d_tablero, *d_tablero_aux;
clock_t inicio, final;
size_t size = N*N*sizeof(double);
//Asignacion de memoria del lado del host
tablero = (double*)malloc(size);
tablero_aux = (double*)malloc(size);
//Asignacion de memoria del lado de device
cudaMalloc(&d_tablero, size);
cudaMalloc(&d_tablero_aux, size);
FILE * archivo = fopen(DIR_FILE, "r");
if (archivo==NULL) {fputs ("File error",stderr); exit (1);}
char caracterAuxiliar;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
caracterAuxiliar = fgetc(archivo);
if (caracterAuxiliar == '1'){
tablero_aux[i*N+j]=tablero[i*N+j]=1.0;
}
else {
tablero_aux[i*N+j]=tablero[i*N+j]=0.0;
}
}
}
fclose(archivo);
inicio = clock(); //tiempo inicial
cudaMemcpy(d_tablero,tablero,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_tablero_aux,tablero_aux,size,cudaMemcpyHostToDevice);
//VOLVER CONSTANTE
int dim_MAX = (int)sqrt(MAX_threads);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((dim_MAX + dimBlock.x-1)/dimBlock.x, (dim_MAX+dimBlock.y-1)/dimBlock.y);
//dim3 dimGrid(2,9);
//imprimeM_P_double<<<1,1>>>(d_tablero);
//cudaThreadSynchronize();
for (int i = 0; i < iteraciones; ++i)
{
//printf("Iteracion %d\n",i+1);
actualiza_Pdouble<<<dimGrid,dimBlock>>>(d_tablero, d_tablero_aux, N, dim_MAX);
cudaThreadSynchronize();//espera a que todos los hilos terminen su ejecución
copiaMatriz_Pdouble<<<dimGrid,dimBlock>>>(d_tablero_aux, d_tablero, N, dim_MAX);
cudaThreadSynchronize();
//imprimeM_Pdouble<<<1,1>>>(d_tablero,N);
//cudaThreadSynchronize();
}
final = clock();
double tiempo = ((double)final - inicio) / CLOCKS_PER_SEC;
//printf("el tiempo final es %f\n", tiempo);
//timepo final
free(tablero);
free(tablero_aux);
cudaFree(d_tablero);
cudaFree(d_tablero_aux);
return tiempo;
}
/*
* Funcion para actualizar la matriz, intercambiandola entre la auxiliar
* y la matriz principal.
*/
__global__ void actualiza_Pdouble(double *malla, double *aux, int N, int dim_MAX){
int contador=0;
int celActual;
int i = blockDim.x * blockIdx.x + threadIdx.x; //fila
int j = blockDim.y * blockIdx.y + threadIdx.y; //Columna
if (i < N && j < N) {
int ii = (int)(N/dim_MAX)+1;
for(int k = 0; k < ii; k++){
celActual = i*N+j;
if(celActual < N*N){
//printf("%d ",celActual);
//Izquierda Arriba
if(i>0 && j>0 && malla[celActual-N-1]==1){
contador++;
}
//Arriba
if(i>0 && malla[celActual-N]==1){
contador++;
}
//Arriba derecha
if(i>0 && j<N-1 && malla[celActual+1-N]==1){
contador++;
}
//Izquierda
if(j>0 && malla[celActual-1]==1){
contador++;
}
//Derecha
if(j<N-1 && malla[celActual+1]==1){
contador++;
}
//Abajo izquierda
if(i<N-1 && j>0 && malla[celActual+N-1]==1){
contador++;
}
//Abajo
if(i<N-1 && malla[celActual+N]==1){
contador++;
}
//Abajo derecha
if(i<N-1 && j<N-1 && malla[celActual+1+N]==1){
contador++;
}
if(malla[celActual]==1){ //Actuamos sobre las celulas en la copia de la matriz
if(contador==2 || contador==3){//La celulas vivas con 2 o 3 celulas vivas pegadas, se mantiene vivas.
aux[celActual]=1;
}
else{ //Si no se cumple la condicion, mueren.
aux[celActual]=0;
}
}
else{
if(contador==3){ //Las celulas muertas con 3 celulas vivas pegadas, resucitan.
aux[celActual]=1;
}
}
contador=0;
}
celActual = celActual + dim_MAX;
}
}
}
__global__ void copiaMatriz_Pdouble(double *malla, double *aux, int N, int dim_MAX){
int celActual;
int i = blockDim.x * blockIdx.x + threadIdx.x; //fila
int j = blockDim.y * blockIdx.y + threadIdx.y; //Columna
if (i < N && j < N) {
int ii = (int)(N/dim_MAX)+1;
for(int k = 0; k < ii; k++){
celActual = i*N+j;
if(celActual < N*N){
aux[celActual] = malla[celActual];
celActual = celActual + dim_MAX;
}
}
}
}
/*
* Funcion para imprimir la matriz
* Como entradas son la malla y el tamaño
*/
__global__ void imprimeM_Pdouble(double *m, int N){
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
{
if (m[i*N+j]==1)
{
printf("* ");
}
else{
printf("- ");
}
}
printf("\n");
}
}
|
893532462034ca6895706f54388024d7445b94b2.hip
|
// !!! This is a file automatically generated by hipify!!!
// $ nvcc -std=c++11 --expt-extended-lambda -I../.. kernel_executor_daxpy.cu -o kernel_executor_daxpy
#include <cassert>
#include <iostream>
#include <chrono>
#include <thrust/device_vector.h>
#include <kernel_executor.hpp>
struct empty {};
void daxpy(const kernel_executor& ex, int n, double a, const double* x, double* y)
{
int block_size = 256;
int num_blocks = (n + block_size - 1) / block_size;
grid_index shape(num_blocks, block_size);
ex.bulk_execute(
[=] __device__ (grid_index idx, empty&, empty&)
{
int block_idx = idx[0].x;
int thread_idx = idx[1].x;
int i = block_idx * block_size + thread_idx;
if(i < n)
{
y[i] = a * x[i] + y[i];
}
},
shape,
[] __host__ __device__ { return empty(); },
[] __host__ __device__ { return empty(); }
);
}
void test(size_t n)
{
// create resources
cuda_context ctx;
hipStream_t stream;
if(hipError_t error = hipStreamCreate(&stream))
{
throw std::runtime_error("test: CUDA error after hipStreamCreate: " + std::string(hipGetErrorString(error)));
}
thrust::device_vector<double> x(n, 1);
thrust::device_vector<double> y(n, 2);
double a = 2;
kernel_executor ex(ctx, stream);
daxpy(ex, n, a, x.data().get(), y.data().get());
ex.wait();
if(hipError_t error = hipStreamDestroy(stream))
{
throw std::runtime_error("test: CUDA error after hipStreamDestroy: " + std::string(hipGetErrorString(error)));
}
thrust::device_vector<double> reference(n, 4);
assert(reference == y);
}
double measure_bandwidth(size_t n, size_t num_trials = 100)
{
thrust::device_vector<double> x(n, 1);
thrust::device_vector<double> y(n, 2);
double a = 2;
// create resources
cuda_context ctx;
hipStream_t stream;
if(hipError_t error = hipStreamCreate(&stream))
{
throw std::runtime_error("measure_bandwidth: CUDA error after hipStreamCreate: " + std::string(hipGetErrorString(error)));
}
kernel_executor ex(ctx, stream);
// time trials
auto start = std::chrono::high_resolution_clock().now();
{
for(size_t i = 0; i < num_trials; ++i)
{
daxpy(ex, n, a, x.data().get(), y.data().get());
}
ex.wait();
}
auto end = std::chrono::high_resolution_clock().now();
// compute mean GB/s
size_t mean_nanoseconds = (std::chrono::duration_cast<std::chrono::nanoseconds>(end - start) / num_trials).count();
double mean_seconds = double(mean_nanoseconds) / 1000000000;
size_t num_bytes = 2 * n * sizeof(double);
double mean_bytes_per_second = double(num_bytes) / mean_seconds;
double mean_gigabytes_per_second = mean_bytes_per_second / 1000000000;
if(hipError_t error = hipStreamDestroy(stream))
{
throw std::runtime_error("measure_bandwidth: CUDA error after hipStreamDestroy: " + std::string(hipGetErrorString(error)));
}
return mean_gigabytes_per_second;
}
int main(int argc, char** argv)
{
size_t n = 1 << 25;
if(argc > 1)
{
n = std::atoi(argv[1]);
}
// first test for correctness
test(n);
double bandwidth = measure_bandwidth(n);
std::clog << n << ", " << bandwidth << std::endl;
std::cout << "Kernel Executor DAXPY bandwidth: " << bandwidth << " GB/s" << std::endl;
std::cout << "OK" << std::endl;
return 0;
}
|
893532462034ca6895706f54388024d7445b94b2.cu
|
// $ nvcc -std=c++11 --expt-extended-lambda -I../.. kernel_executor_daxpy.cu -o kernel_executor_daxpy
#include <cassert>
#include <iostream>
#include <chrono>
#include <thrust/device_vector.h>
#include <kernel_executor.hpp>
struct empty {};
void daxpy(const kernel_executor& ex, int n, double a, const double* x, double* y)
{
int block_size = 256;
int num_blocks = (n + block_size - 1) / block_size;
grid_index shape(num_blocks, block_size);
ex.bulk_execute(
[=] __device__ (grid_index idx, empty&, empty&)
{
int block_idx = idx[0].x;
int thread_idx = idx[1].x;
int i = block_idx * block_size + thread_idx;
if(i < n)
{
y[i] = a * x[i] + y[i];
}
},
shape,
[] __host__ __device__ { return empty(); },
[] __host__ __device__ { return empty(); }
);
}
void test(size_t n)
{
// create resources
cuda_context ctx;
cudaStream_t stream;
if(cudaError_t error = cudaStreamCreate(&stream))
{
throw std::runtime_error("test: CUDA error after cudaStreamCreate: " + std::string(cudaGetErrorString(error)));
}
thrust::device_vector<double> x(n, 1);
thrust::device_vector<double> y(n, 2);
double a = 2;
kernel_executor ex(ctx, stream);
daxpy(ex, n, a, x.data().get(), y.data().get());
ex.wait();
if(cudaError_t error = cudaStreamDestroy(stream))
{
throw std::runtime_error("test: CUDA error after cudaStreamDestroy: " + std::string(cudaGetErrorString(error)));
}
thrust::device_vector<double> reference(n, 4);
assert(reference == y);
}
double measure_bandwidth(size_t n, size_t num_trials = 100)
{
thrust::device_vector<double> x(n, 1);
thrust::device_vector<double> y(n, 2);
double a = 2;
// create resources
cuda_context ctx;
cudaStream_t stream;
if(cudaError_t error = cudaStreamCreate(&stream))
{
throw std::runtime_error("measure_bandwidth: CUDA error after cudaStreamCreate: " + std::string(cudaGetErrorString(error)));
}
kernel_executor ex(ctx, stream);
// time trials
auto start = std::chrono::high_resolution_clock().now();
{
for(size_t i = 0; i < num_trials; ++i)
{
daxpy(ex, n, a, x.data().get(), y.data().get());
}
ex.wait();
}
auto end = std::chrono::high_resolution_clock().now();
// compute mean GB/s
size_t mean_nanoseconds = (std::chrono::duration_cast<std::chrono::nanoseconds>(end - start) / num_trials).count();
double mean_seconds = double(mean_nanoseconds) / 1000000000;
size_t num_bytes = 2 * n * sizeof(double);
double mean_bytes_per_second = double(num_bytes) / mean_seconds;
double mean_gigabytes_per_second = mean_bytes_per_second / 1000000000;
if(cudaError_t error = cudaStreamDestroy(stream))
{
throw std::runtime_error("measure_bandwidth: CUDA error after cudaStreamDestroy: " + std::string(cudaGetErrorString(error)));
}
return mean_gigabytes_per_second;
}
int main(int argc, char** argv)
{
size_t n = 1 << 25;
if(argc > 1)
{
n = std::atoi(argv[1]);
}
// first test for correctness
test(n);
double bandwidth = measure_bandwidth(n);
std::clog << n << ", " << bandwidth << std::endl;
std::cout << "Kernel Executor DAXPY bandwidth: " << bandwidth << " GB/s" << std::endl;
std::cout << "OK" << std::endl;
return 0;
}
|
b424ebd5738c01cc693b10a68df4ad3151a81733.hip
|
// !!! This is a file automatically generated by hipify!!!
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <math.h>
extern "C"
{
__global__ void FullyConnectedCurvatureKernel(
float *weightsGradPtr,
float *biasGradPtr,
float *shiftedWeightsPtr,
float *shiftedBiasPtr,
float *avgWeightGradPtr,
float *avgBiasGradPtr,
float *weightGradCurvePtr,
float *biasGradCurvePtr,
float *dropoutMaskPtr,
int prevLayerSize,
int thisLayerSize
)
{
// i: prev. layer neuron id
// j: current layer neuron id
float avgGrad;
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (!dropoutMaskPtr[j])
{
int index = j;
for (i = 0; i < prevLayerSize; i++)
{
// weight finite difference curvature
avgGrad = avgWeightGradPtr[index];
if (avgGrad == 0)
avgGrad == 0.000001; // don't divide by 0!
weightGradCurvePtr[index] = abs(weightsGradPtr[index] - shiftedWeightsPtr[index]) / avgGrad;
index += thisLayerSize;
}
// bias finite difference curvature
avgGrad = avgBiasGradPtr[j];
if (avgGrad == 0)
avgGrad == 0.000001; // don't divide by 0!
biasGradCurvePtr[j] = abs(biasGradPtr[j] - shiftedBiasPtr[j]) / avgGrad;
}
}
}
}
|
b424ebd5738c01cc693b10a68df4ad3151a81733.cu
|
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <math.h>
extern "C"
{
__global__ void FullyConnectedCurvatureKernel(
float *weightsGradPtr,
float *biasGradPtr,
float *shiftedWeightsPtr,
float *shiftedBiasPtr,
float *avgWeightGradPtr,
float *avgBiasGradPtr,
float *weightGradCurvePtr,
float *biasGradCurvePtr,
float *dropoutMaskPtr,
int prevLayerSize,
int thisLayerSize
)
{
// i: prev. layer neuron id
// j: current layer neuron id
float avgGrad;
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (!dropoutMaskPtr[j])
{
int index = j;
for (i = 0; i < prevLayerSize; i++)
{
// weight finite difference curvature
avgGrad = avgWeightGradPtr[index];
if (avgGrad == 0)
avgGrad == 0.000001; // don't divide by 0!
weightGradCurvePtr[index] = abs(weightsGradPtr[index] - shiftedWeightsPtr[index]) / avgGrad;
index += thisLayerSize;
}
// bias finite difference curvature
avgGrad = avgBiasGradPtr[j];
if (avgGrad == 0)
avgGrad == 0.000001; // don't divide by 0!
biasGradCurvePtr[j] = abs(biasGradPtr[j] - shiftedBiasPtr[j]) / avgGrad;
}
}
}
}
|
108f6448ac54480bd7c419f0680e17f71ea82027.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlascl.cu, normal z -> s, Mon Jun 25 18:24:12 2018
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl_full(
int m, int n, float mul,
float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl_lower(
int m, int n, float mul,
float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl_upper(
int m, int n, float mul,
float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
SLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
kl INTEGER
Unused, for LAPACK compatability.
@param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
@param[in]
cfrom REAL
@param[in]
cto REAL
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl
*******************************************************************************/
extern "C" void
magmablas_slascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
float cfrom, float cto,
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
float smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK slascl
// Get machine parameters
smlnum = lapackf77_slamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if ( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if ( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if ( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if ( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
hipLaunchKernelGGL(( slascl_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( slascl_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( slascl_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, mul, dA, ldda);
}
cnt += 1;
}
}
|
108f6448ac54480bd7c419f0680e17f71ea82027.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlascl.cu, normal z -> s, Mon Jun 25 18:24:12 2018
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl_full(
int m, int n, float mul,
float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl_lower(
int m, int n, float mul,
float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl_upper(
int m, int n, float mul,
float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
SLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
kl INTEGER
Unused, for LAPACK compatability.
@param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
@param[in]
cfrom REAL
@param[in]
cto REAL
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl
*******************************************************************************/
extern "C" void
magmablas_slascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
float cfrom, float cto,
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
float smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK slascl
// Get machine parameters
smlnum = lapackf77_slamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if ( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if ( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if ( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if ( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
slascl_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
slascl_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
slascl_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, mul, dA, ldda);
}
cnt += 1;
}
}
|
b3775ea86132900563b80133d06ae18972317953.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <iomanip>
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/gpu/gpu.hpp"
#include <stdio.h>
#include <time.h>
#include <cmath>
#include <stdio.h>
#include <math.h>
using namespace std;
using namespace cv;
using namespace cv::gpu;
#define msg(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); } while (0)
#define err(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); exit(1); } while (0)
#define malloc2D(name, xDim, yDim, type) do { \
name = (type **)malloc(xDim * sizeof(type *)); \
assert(name != NULL); \
name[0] = (type *)malloc(xDim * yDim * sizeof(type)); \
assert(name[0] != NULL); \
for (size_t i = 1; i < xDim; i++) \
name[i] = name[i-1] + yDim; \
} while (0)
#ifdef __HIPCC__
inline void checkCuda(hipError_t e) {
if (e != hipSuccess) {
// hipGetErrorString() isn't always very helpful. Look up the error
// number in the hipError_t enum in hip/driver_types.h in the CUDA includes
// directory for a better explanation.
err("CUDA Error %d: %s\n", e, hipGetErrorString(e));
}
}
inline void checkLastCudaError() {
checkCuda(hipGetLastError());
}
#endif
const float pii = asin(1.0) * 2;
int minn(int a, int b, int c) {
return min(a, min(b, c));
}
int Rgb2Hsi(const IplImage* img, CvMat* dataH, CvMat* dataS, CvMat* dataI) {
uchar* data;
// rgb
int img_r, img_g, img_b;
int min_rgb; // rgb
// HSI
float fHue, fSaturation, fIntensity;
for (int i = 0; i < img->height; i++) {
for (int j = 0; j < img->width; j++) {
//data = cvPtr2D(img, i, j, 0);
data = (uchar *) img->imageData + i * img->widthStep + j * 3;
img_b = (uchar) *data;
data++;
img_g = (uchar) *data;
data++;
img_r = (uchar) *data;
// Intensity[0, 1]
fIntensity = (float) ((img_b + img_g + img_r) / 3) / 255;
// RGB
float fTemp = img_r < img_g ? img_r : img_g;
min_rgb = fTemp < img_b ? fTemp : img_b;
// Saturation[0, 1]
fSaturation = 1.0f
- (float) (3 * min_rgb) / (img_r + img_g + img_b);
// theta
float numerator = (img_r - img_g + img_r - img_b) / 2;
float denominator = sqrt(
pow((float) (img_r - img_g), 2)
+ (img_r - img_b) * (img_g - img_b));
// Hue
if (denominator != 0) {
float theta = acos(numerator / denominator) * 180 / 3.14;
if (img_b <= img_g) {
fHue = theta;
} else {
fHue = 360 - theta;
}
} else {
fHue = 0;
}
//
//printf("%f %f %f\n",fHue,fSaturation, fIntensity);
cvmSet(dataH, i, j, fHue);
cvmSet(dataS, i, j, fSaturation);
cvmSet(dataI, i, j, fIntensity);
}
}
return 1;
}
int Hsi2Rgb(IplImage* src, CvMat* dataH, CvMat* dataS, CvMat* dataI) {
uchar iB, iG, iR;
for (int i = 0; i < src->height; i++) {
for (int j = 0; j < src->width; j++) {
// H
double dH = cvmGet(dataH, i, j);
// S
double dS = cvmGet(dataS, i, j);
//
double dI = cvmGet(dataI, i, j);
double dTempB, dTempG, dTempR;
// RG
if (dH < 120 && dH >= 0) {
// H
dH = dH * pii / 180;
dTempB = dI * (1 - dS);
dTempR = dI * (1 + (dS * cos(dH)) / cos(pii / 3 - dH));
dTempG = (3 * dI - (dTempR + dTempB));
}
// GB
else if (dH < 240 && dH >= 120) {
dH -= 120;
// H
dH = dH * pii / 180;
dTempR = dI * (1 - dS);
dTempG = dI * (1 + dS * cos(dH) / cos(pii / 3 - dH));
dTempB = (3 * dI - (dTempR + dTempG));
}
// BR
else {
dH -= 240;
// H
dH = dH * pii / 180;
dTempG = dI * (1 - dS);
dTempB = dI * (1 + (dS * cos(dH)) / cos(pii / 3 - dH));
dTempR = (3 * dI - (dTempG + dTempB));
}
//printf("%f %f %f\n", dTempB, dTempG, dTempR);
if (dTempR > 1.0)
dTempR = 1.0;
if (dTempR < 0)
dTempR = 0.0;
if (dTempG > 1.0)
dTempG = 1.0;
if (dTempG < 0)
dTempG = 0.0;
if (dTempB > 1.0)
dTempB = 1.0;
if (dTempB < 0)
dTempB = 0.0;
iB = (uchar) (dTempB * 255);
iG = (uchar) (dTempG * 255);
iR = (uchar) (dTempR * 255);
cvSet2D(src, i, j, cvScalar(iB, iG, iR));
//offset = src->widthStep * i + j * src->nChannels;
//src->imageData[offset] = iB;
//src->imageData[offset+1] = iG;
//src->imageData[offset+2] = iR;
}
}
return 1;
}
int EqualizeHist(CvMat *pImg) {
int histogram[256];
memset(histogram, 0, sizeof(histogram));
for (int y = 0; y < pImg->rows; y++) {
for (int x = 0; x < pImg->cols; x++) {
int t = 255.0 * cvmGet(pImg, y, x) + 0.5;
if (t > 255)
t = 255;
if (t < 0)
t = 0;
histogram[(int) t]++;
}
}
int Min = 1000000000;
for (int i = 1; i < 255; i++) {
histogram[i] += histogram[i - 1];
if (histogram[i] < Min)
Min = histogram[i];
}
int num = pImg->height * pImg->width - Min;
for (int y = 0; y < pImg->rows; y++) {
for (int x = 0; x < pImg->cols; x++) {
int t = 255.0 * (cvmGet(pImg, y, x)) + 0.5;
if (t > 255)
t = 255;
if (t < 0)
t = 0;
t = histogram[t];
//printf("%f ",(float)(t - 1) / num);
cvmSet(pImg, y, x, (float) (1.0 * t - Min) / num);
} //printf("\n");
}
return true;
}
__global__ void g_rgb2hsi(int *g_rgb, float *g_h, float *g_s, float *g_i,
int len) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= len) {
return;
}
int img_r, img_g, img_b;
int min_rgb;
float fHue, fSaturation, fIntensity;
int data = g_rgb[tid];
int t = (1 << 8) - 1;
img_r = data & t;
data = data >> 8;
img_g = data & t;
data = data >> 8;
img_b = data & t;
fIntensity = (float) ((img_b + img_g + img_r) / 3) / 255;
float fTemp = img_r < img_g ? img_r : img_g;
min_rgb = fTemp < img_b ? fTemp : img_b;
fSaturation = 1.0f - (float) (3 * min_rgb) / (img_r + img_g + img_b);
float numerator = (img_r - img_g + img_r - img_b) / 2;
float denominator = sqrt(
pow((float) (img_r - img_g), 2)
+ (img_r - img_b) * (img_g - img_b));
if (denominator != 0) {
float theta = acos(numerator / denominator) * 180 / 3.14;
if (img_b <= img_g) {
fHue = theta;
} else {
fHue = 360 - theta;
}
} else {
fHue = 0;
}
g_h[tid] = fHue;
g_s[tid] = fSaturation;
g_i[tid] = fIntensity;
}
__global__ void g_hsi2rgb(int *g_rgb, float *g_h, float *g_s, float *g_i,
int len, double pi) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= len) {
return;
}
uchar iB, iG, iR;
float dH = (float) g_h[tid];
float dS = (float) g_s[tid];
float dI = (float) g_i[tid];
float dTempB, dTempG, dTempR;
if (dH < 120 && dH >= 0) {
// H
dH = dH * pi / 180;
dTempB = dI * (1 - dS);
dTempR = dI * (1 + (dS * cos(dH)) / cos(pi / 3 - dH));
dTempG = (3 * dI - (dTempR + dTempB));
}
// GB
else if (dH < 240 && dH >= 120) {
dH -= 120;
// H
dH = dH * pi / 180;
dTempR = dI * (1 - dS);
dTempG = dI * (1 + dS * cos(dH) / cos(pi / 3 - dH));
dTempB = (3 * dI - (dTempR + dTempG));
}
// BR
else {
dH -= 240;
// H
dH = dH * pi / 180;
dTempG = dI * (1 - dS);
dTempB = dI * (1 + (dS * cos(dH)) / cos(pi / 3 - dH));
dTempR = (3 * dI - (dTempG + dTempB));
}
//printf("%f %f %f\n", dTempB, dTempG, dTempR);
if (dTempR > 1.0)
dTempR = 1.0;
if (dTempR < 0)
dTempR = 0.0;
if (dTempG > 1.0)
dTempG = 1.0;
if (dTempG < 0)
dTempG = 0.0;
if (dTempB > 1.0)
dTempB = 1.0;
if (dTempB < 0)
dTempB = 0.0;
iB = (uchar) (dTempB * 255);
iG = (uchar) (dTempG * 255);
iR = (uchar) (dTempR * 255);
int t = 0;
t |= (iR);
t |= (iG << 8);
t |= (iB << 16);
g_rgb[tid] = t;
}
bool GPU_Rgb2Hsi(const IplImage* img, CvMat* dataH, CvMat* dataS,
CvMat* dataI) {
int * d_rgb;
float *d_h;
float *d_s;
float *d_i;
int * h_rgb;
float *h_h;
float *h_s;
float *h_i;
int len = img->width * img->height;
hipMalloc((void **) &d_rgb, len * sizeof(int));
hipMalloc((void **) &d_h, len * sizeof(float));
hipMalloc((void **) &d_s, len * sizeof(float));
hipMalloc((void **) &d_i, len * sizeof(float));
h_rgb = (int *) malloc(len * sizeof(int));
h_h = (float *) malloc(len * sizeof(float));
h_s = (float *) malloc(len * sizeof(float));
h_i = (float *) malloc(len * sizeof(float));
hipDeviceProp_t deviceProp;
int deviceNum;
hipGetDevice(&deviceNum);
hipGetDeviceProperties(&deviceProp, deviceNum);
int maxThread = deviceProp.maxThreadsPerBlock;
dim3 thread = dim3(maxThread <= len ? maxThread : len);
dim3 block = dim3((len + maxThread - 1) / maxThread);
// restore the rgb data, r(byte),g(byte),b(byte)store in 32-bit word memory
for (int i = 0; i < img->height; i++) {
for (int j = 0; j < img->width; j++) {
uchar *data = (uchar *) img->imageData + i * img->widthStep + j * 3;
uchar img_b = (uchar) *data;
data++;
uchar img_g = (uchar) *data;
data++;
uchar img_r = (uchar) *data;
int t = 0;
t |= (img_r);
t |= (img_g << 8);
t |= (img_b << 16);
h_rgb[i * img->width + j] = t;
}
}
hipMemcpy(d_rgb, h_rgb, sizeof(int) * len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( g_rgb2hsi), dim3(block), dim3(thread), 0, 0, d_rgb, d_h, d_s, d_i, len);
hipDeviceSynchronize();
checkLastCudaError();
hipMemcpy(h_h, d_h, sizeof(float) * len, hipMemcpyDeviceToHost);
hipMemcpy(h_s, d_s, sizeof(float) * len, hipMemcpyDeviceToHost);
hipMemcpy(h_i, d_i, sizeof(float) * len, hipMemcpyDeviceToHost);
for (int i = 0; i < img->height; i++) {
for (int j = 0; j < img->width; j++) {
int t = i * img->width + j;
cvmSet(dataH, i, j, h_h[t]);
cvmSet(dataS, i, j, h_s[t]);
cvmSet(dataI, i, j, h_i[t]);
}
}
free(h_rgb);
free(h_h);
free(h_s);
free(h_i);
hipFree(d_rgb);
hipFree(d_h);
hipFree(d_s);
hipFree(d_i);
return true;
}
bool GPU_Hsi2Rgb(IplImage* img, CvMat* dataH, CvMat* dataS, CvMat* dataI) {
int * d_rgb;
float *d_h;
float *d_s;
float *d_i;
int * h_rgb;
float *h_h;
float *h_s;
float *h_i;
int len = img->width * img->height;
hipMalloc((void **) &d_rgb, len * sizeof(int));
hipMalloc((void **) &d_h, len * sizeof(float));
hipMalloc((void **) &d_s, len * sizeof(float));
hipMalloc((void **) &d_i, len * sizeof(float));
h_rgb = (int *) malloc(len * sizeof(int));
h_h = (float *) malloc(len * sizeof(float));
h_s = (float *) malloc(len * sizeof(float));
h_i = (float *) malloc(len * sizeof(float));
hipDeviceProp_t deviceProp;
int deviceNum;
hipGetDevice(&deviceNum);
hipGetDeviceProperties(&deviceProp, deviceNum);
int maxThread = deviceProp.maxThreadsPerBlock;
dim3 thread = dim3(maxThread <= len ? maxThread : len);
dim3 block = dim3((len + maxThread - 1) / maxThread);
for (int i = 0; i < img->height; i++) {
for (int j = 0; j < img->width; j++) {
float dH = cvmGet(dataH, i, j);
float dS = cvmGet(dataS, i, j);
float dI = cvmGet(dataI, i, j);
int t = i * img->width + j;
h_h[t] = dH;
h_s[t] = dS;
h_i[t] = dI;
}
}
hipMemcpy(d_h, h_h, sizeof(float) * len, hipMemcpyHostToDevice);
hipMemcpy(d_s, h_s, sizeof(float) * len, hipMemcpyHostToDevice);
hipMemcpy(d_i, h_i, sizeof(float) * len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( g_hsi2rgb), dim3(block), dim3(thread), 0, 0, d_rgb, d_h, d_s, d_i, len, pii);
hipDeviceSynchronize();
hipMemcpy(h_rgb, d_rgb, sizeof(int) * len, hipMemcpyDeviceToHost);
for (int i = 0; i < img->height; i++) {
for (int j = 0; j < img->width; j++) {
int id = i * img->width + j;
int data = h_rgb[id];
int t = (1 << 8) - 1;
uchar img_r = data & t;
data = data >> 8;
uchar img_g = data & t;
data = data >> 8;
uchar img_b = data & t;
cvSet2D(img, i, j, cvScalar(img_b, img_g, img_r));
}
}
free(h_rgb);
free(h_h);
free(h_s);
free(h_i);
hipFree(d_rgb);
hipFree(d_h);
hipFree(d_s);
hipFree(d_i);
//printf("%d %d\n",sizeof(uchar), sizeof(char));
return true;
}
void GPU(char * path) {
IplImage * Img = cvLoadImage(path, 1);
CvMat * HImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * SImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * IImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
float start = clock();
if (GPU_Rgb2Hsi(Img, HImg, SImg, IImg) == 0)
{
printf("Convert Error!\n");
exit(-1);
}
EqualizeHist(IImg);
if (GPU_Hsi2Rgb(Img, HImg, SImg, IImg) == 0) {
printf("Convert Error!\n");
exit(-1);
}
float end = clock();
printf("time = %fms\n", 1000.0f * (end - start) / CLOCKS_PER_SEC);
cvNamedWindow("GPU_1", 1);
cvShowImage("GPU_1", Img);
cvWaitKey(0);
}
__global__ void g_map(float *d_i, int *d_histogram, int len, float num, float Min) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= len) {
return;
}
__shared__ int h[256];
if(threadIdx.x < 256)
{
h[threadIdx.x] = d_histogram[threadIdx.x];
}
__syncthreads();
int t = 255.0 * (d_i[tid]) + 0.5;
if (t > 255)
t = 255;
else if (t < 0)
t = 0;
t = h[t];
//printf("%d %d\n", t, h_histogram[t]);
d_i[tid] = (float) (1.0 * t - Min) / num;
}
void new_GPU(char * path) {
IplImage * Img = cvLoadImage(path, 1);
CvMat * HImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * SImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * IImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
float start = clock();
int *d_rgb;
float *d_h;
float *d_s;
float *d_i;
int *d_histogram;
int *h_rgb;
float *h_h;
float *h_s;
float *h_i;
int *h_histogram;
int len = Img->width * Img->height;
hipMalloc((void **) &d_rgb, len * sizeof(int));
hipMalloc((void **) &d_h, len * sizeof(float));
hipMalloc((void **) &d_s, len * sizeof(float));
hipMalloc((void **) &d_i, len * sizeof(float));
hipMalloc((void **) &d_histogram, 256 * sizeof(float));
h_rgb = (int *) malloc(len * sizeof(int));
h_h = (float *) malloc(len * sizeof(float));
h_s = (float *) malloc(len * sizeof(float));
h_i = (float *) malloc(len * sizeof(float));
h_histogram = (int *) malloc(256 * sizeof(int));
hipDeviceProp_t deviceProp;
int deviceNum;
hipGetDevice(&deviceNum);
hipGetDeviceProperties(&deviceProp, deviceNum);
int maxThread = deviceProp.maxThreadsPerBlock;
dim3 thread = dim3(maxThread <= len ? maxThread : len);
dim3 block = dim3((len + maxThread - 1) / maxThread);
// restore the rgb data, r(byte),g(byte),b(byte)store in 32-bit word memory
for (int i = 0; i < Img->height; i++) {
for (int j = 0; j < Img->width; j++) {
uchar *data = (uchar *) Img->imageData + i * Img->widthStep + j * 3;
uchar img_b = (uchar) *data;
data++;
uchar img_g = (uchar) *data;
data++;
uchar img_r = (uchar) *data;
int t = 0;
t |= (img_r);
t |= (img_g << 8);
t |= (img_b << 16);
h_rgb[i * Img->width + j] = t;
}
}
//TODO rbg2hsi
hipMemcpy(d_rgb, h_rgb, sizeof(int) * len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( g_rgb2hsi), dim3(block), dim3(thread), 0, 0, d_rgb, d_h, d_s, d_i, len);
hipDeviceSynchronize();
checkLastCudaError();
//TODO EqualizeHist
hipMemcpy(h_i, d_i, sizeof(float) * len, hipMemcpyDeviceToHost);
memset(h_histogram, 0, sizeof(int) * 256);
for (int y = 0; y < Img->height; y++) {
for (int x = 0; x < Img->width; x++) {
int id = y * Img->width + x;
int t = 255.0 * h_i[id] + 0.5;
if (t > 255)
t = 255;
else if (t < 0)
t = 0;
h_histogram[(int) t]++;
}
}
int Min = 1000000000;
for (int i = 1; i < 255; i++) {
h_histogram[i] += h_histogram[i - 1];
if (h_histogram[i] < Min)
Min = h_histogram[i];
//printf("%d\n",h_histogram[i]);
}
//udaMemcpy(d_i, h_i, sizeof(float) * len, hipMemcpyHostToDevice);
hipMemcpy(d_histogram, h_histogram, sizeof(int) * 256, hipMemcpyHostToDevice);
int num = Img->height * Img->width - Min;
hipLaunchKernelGGL(( g_map), dim3(block),dim3(thread), 0, 0, d_i, d_histogram,num, len,Min);
//TODO hsi2rgb
hipLaunchKernelGGL(( g_hsi2rgb), dim3(block), dim3(thread), 0, 0, d_rgb, d_h, d_s, d_i, len, pii);
hipDeviceSynchronize();
checkLastCudaError();
hipMemcpy(h_rgb, d_rgb, sizeof(int) * len, hipMemcpyDeviceToHost);
for (int i = 0; i < Img->height; i++) {
for (int j = 0; j < Img->width; j++) {
int id = i * Img->width + j;
int data = h_rgb[id];
int t = (1 << 8) - 1;
uchar img_r = data & t;
data = data >> 8;
uchar img_g = data & t;
data = data >> 8;
uchar img_b = data & t;
cvSet2D(Img, i, j, cvScalar(img_b, img_g, img_r));
}
}
float end = clock();
free(h_rgb);
free(h_h);
free(h_s);
free(h_i);
free(h_histogram);
hipFree(d_histogram);
hipFree(d_rgb);
hipFree(d_h);
hipFree(d_s);
hipFree(d_i);
hipFree(d_histogram);
printf("time = %fms\n", 1000.0f * (end - start) / CLOCKS_PER_SEC);
cvNamedWindow("GPU_2", 1);
cvShowImage("GPU_2", Img);
cvWaitKey(0);
}
void CPU(char * path) {
IplImage * Img = cvLoadImage(path, 1);
CvMat * HImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * SImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * IImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
float start = clock();
if (Rgb2Hsi(Img, HImg, SImg, IImg) == 0) {
printf("Convert Error!\n");
exit(-1);
}
EqualizeHist(IImg);
if (Hsi2Rgb(Img, HImg, SImg, IImg) == 0)
{
printf("Convert Error!\n");
exit(-1);
}
float end = clock();
printf("time = %fms\n", 1000.0f * (end - start) / CLOCKS_PER_SEC);
cvNamedWindow("CPU", 1);
cvShowImage("CPU", Img);
cvWaitKey(0);
}
int main(int argc, char ** argv) {
CPU("1.jpg");
GPU("1.jpg");
new_GPU("1.jpg");
return 0;
}
|
b3775ea86132900563b80133d06ae18972317953.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <iomanip>
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/gpu/gpu.hpp"
#include <stdio.h>
#include <time.h>
#include <cmath>
#include <stdio.h>
#include <math.h>
using namespace std;
using namespace cv;
using namespace cv::gpu;
#define msg(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); } while (0)
#define err(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); exit(1); } while (0)
#define malloc2D(name, xDim, yDim, type) do { \
name = (type **)malloc(xDim * sizeof(type *)); \
assert(name != NULL); \
name[0] = (type *)malloc(xDim * yDim * sizeof(type)); \
assert(name[0] != NULL); \
for (size_t i = 1; i < xDim; i++) \
name[i] = name[i-1] + yDim; \
} while (0)
#ifdef __CUDACC__
inline void checkCuda(cudaError_t e) {
if (e != cudaSuccess) {
// cudaGetErrorString() isn't always very helpful. Look up the error
// number in the cudaError enum in driver_types.h in the CUDA includes
// directory for a better explanation.
err("CUDA Error %d: %s\n", e, cudaGetErrorString(e));
}
}
inline void checkLastCudaError() {
checkCuda(cudaGetLastError());
}
#endif
const float pii = asin(1.0) * 2;
int minn(int a, int b, int c) {
return min(a, min(b, c));
}
int Rgb2Hsi(const IplImage* img, CvMat* dataH, CvMat* dataS, CvMat* dataI) {
uchar* data;
// rgb·ÖÁ¿
int img_r, img_g, img_b;
int min_rgb; // rgb·ÖÁ¿ÖеÄ×îСֵ
// HSI·ÖÁ¿
float fHue, fSaturation, fIntensity;
for (int i = 0; i < img->height; i++) {
for (int j = 0; j < img->width; j++) {
//data = cvPtr2D(img, i, j, 0);
data = (uchar *) img->imageData + i * img->widthStep + j * 3;
img_b = (uchar) *data;
data++;
img_g = (uchar) *data;
data++;
img_r = (uchar) *data;
// Intensity·ÖÁ¿[0, 1]
fIntensity = (float) ((img_b + img_g + img_r) / 3) / 255;
// µÃµœRGB·ÖÁ¿ÖеÄ×îСֵ
float fTemp = img_r < img_g ? img_r : img_g;
min_rgb = fTemp < img_b ? fTemp : img_b;
// Saturation·ÖÁ¿[0, 1]
fSaturation = 1.0f
- (float) (3 * min_rgb) / (img_r + img_g + img_b);
// ŒÆËãthetaœÇ
float numerator = (img_r - img_g + img_r - img_b) / 2;
float denominator = sqrt(
pow((float) (img_r - img_g), 2)
+ (img_r - img_b) * (img_g - img_b));
// ŒÆËãHue·ÖÁ¿
if (denominator != 0) {
float theta = acos(numerator / denominator) * 180 / 3.14;
if (img_b <= img_g) {
fHue = theta;
} else {
fHue = 360 - theta;
}
} else {
fHue = 0;
}
// ž³Öµ
//printf("%f %f %f\n",fHue,fSaturation, fIntensity);
cvmSet(dataH, i, j, fHue);
cvmSet(dataS, i, j, fSaturation);
cvmSet(dataI, i, j, fIntensity);
}
}
return 1;
}
int Hsi2Rgb(IplImage* src, CvMat* dataH, CvMat* dataS, CvMat* dataI) {
uchar iB, iG, iR;
for (int i = 0; i < src->height; i++) {
for (int j = 0; j < src->width; j++) {
// žÃµãµÄÉ«¶ÈH
double dH = cvmGet(dataH, i, j);
// žÃµãµÄÉ«±¥ºÍ¶ÈS
double dS = cvmGet(dataS, i, j);
// žÃµãµÄÁÁ¶È
double dI = cvmGet(dataI, i, j);
double dTempB, dTempG, dTempR;
// RGÉÈÇø
if (dH < 120 && dH >= 0) {
// œ«HתΪ»¡¶È±íÊŸ
dH = dH * pii / 180;
dTempB = dI * (1 - dS);
dTempR = dI * (1 + (dS * cos(dH)) / cos(pii / 3 - dH));
dTempG = (3 * dI - (dTempR + dTempB));
}
// GBÉÈÇø
else if (dH < 240 && dH >= 120) {
dH -= 120;
// œ«HתΪ»¡¶È±íÊŸ
dH = dH * pii / 180;
dTempR = dI * (1 - dS);
dTempG = dI * (1 + dS * cos(dH) / cos(pii / 3 - dH));
dTempB = (3 * dI - (dTempR + dTempG));
}
// BRÉÈÇø
else {
dH -= 240;
// œ«HתΪ»¡¶È±íÊŸ
dH = dH * pii / 180;
dTempG = dI * (1 - dS);
dTempB = dI * (1 + (dS * cos(dH)) / cos(pii / 3 - dH));
dTempR = (3 * dI - (dTempG + dTempB));
}
//printf("%f %f %f\n", dTempB, dTempG, dTempR);
if (dTempR > 1.0)
dTempR = 1.0;
if (dTempR < 0)
dTempR = 0.0;
if (dTempG > 1.0)
dTempG = 1.0;
if (dTempG < 0)
dTempG = 0.0;
if (dTempB > 1.0)
dTempB = 1.0;
if (dTempB < 0)
dTempB = 0.0;
iB = (uchar) (dTempB * 255);
iG = (uchar) (dTempG * 255);
iR = (uchar) (dTempR * 255);
cvSet2D(src, i, j, cvScalar(iB, iG, iR));
//offset = src->widthStep * i + j * src->nChannels;
//src->imageData[offset] = iB;
//src->imageData[offset+1] = iG;
//src->imageData[offset+2] = iR;
}
}
return 1;
}
int EqualizeHist(CvMat *pImg) {
int histogram[256];
memset(histogram, 0, sizeof(histogram));
for (int y = 0; y < pImg->rows; y++) {
for (int x = 0; x < pImg->cols; x++) {
int t = 255.0 * cvmGet(pImg, y, x) + 0.5;
if (t > 255)
t = 255;
if (t < 0)
t = 0;
histogram[(int) t]++;
}
}
int Min = 1000000000;
for (int i = 1; i < 255; i++) {
histogram[i] += histogram[i - 1];
if (histogram[i] < Min)
Min = histogram[i];
}
int num = pImg->height * pImg->width - Min;
for (int y = 0; y < pImg->rows; y++) {
for (int x = 0; x < pImg->cols; x++) {
int t = 255.0 * (cvmGet(pImg, y, x)) + 0.5;
if (t > 255)
t = 255;
if (t < 0)
t = 0;
t = histogram[t];
//printf("%f ",(float)(t - 1) / num);
cvmSet(pImg, y, x, (float) (1.0 * t - Min) / num);
} //printf("\n");
}
return true;
}
__global__ void g_rgb2hsi(int *g_rgb, float *g_h, float *g_s, float *g_i,
int len) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= len) {
return;
}
int img_r, img_g, img_b;
int min_rgb;
float fHue, fSaturation, fIntensity;
int data = g_rgb[tid];
int t = (1 << 8) - 1;
img_r = data & t;
data = data >> 8;
img_g = data & t;
data = data >> 8;
img_b = data & t;
fIntensity = (float) ((img_b + img_g + img_r) / 3) / 255;
float fTemp = img_r < img_g ? img_r : img_g;
min_rgb = fTemp < img_b ? fTemp : img_b;
fSaturation = 1.0f - (float) (3 * min_rgb) / (img_r + img_g + img_b);
float numerator = (img_r - img_g + img_r - img_b) / 2;
float denominator = sqrt(
pow((float) (img_r - img_g), 2)
+ (img_r - img_b) * (img_g - img_b));
if (denominator != 0) {
float theta = acos(numerator / denominator) * 180 / 3.14;
if (img_b <= img_g) {
fHue = theta;
} else {
fHue = 360 - theta;
}
} else {
fHue = 0;
}
g_h[tid] = fHue;
g_s[tid] = fSaturation;
g_i[tid] = fIntensity;
}
__global__ void g_hsi2rgb(int *g_rgb, float *g_h, float *g_s, float *g_i,
int len, double pi) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= len) {
return;
}
uchar iB, iG, iR;
float dH = (float) g_h[tid];
float dS = (float) g_s[tid];
float dI = (float) g_i[tid];
float dTempB, dTempG, dTempR;
if (dH < 120 && dH >= 0) {
// œ«HתΪ»¡¶È±íÊŸ
dH = dH * pi / 180;
dTempB = dI * (1 - dS);
dTempR = dI * (1 + (dS * cos(dH)) / cos(pi / 3 - dH));
dTempG = (3 * dI - (dTempR + dTempB));
}
// GBÉÈÇø
else if (dH < 240 && dH >= 120) {
dH -= 120;
// œ«HתΪ»¡¶È±íÊŸ
dH = dH * pi / 180;
dTempR = dI * (1 - dS);
dTempG = dI * (1 + dS * cos(dH) / cos(pi / 3 - dH));
dTempB = (3 * dI - (dTempR + dTempG));
}
// BRÉÈÇø
else {
dH -= 240;
// œ«HתΪ»¡¶È±íÊŸ
dH = dH * pi / 180;
dTempG = dI * (1 - dS);
dTempB = dI * (1 + (dS * cos(dH)) / cos(pi / 3 - dH));
dTempR = (3 * dI - (dTempG + dTempB));
}
//printf("%f %f %f\n", dTempB, dTempG, dTempR);
if (dTempR > 1.0)
dTempR = 1.0;
if (dTempR < 0)
dTempR = 0.0;
if (dTempG > 1.0)
dTempG = 1.0;
if (dTempG < 0)
dTempG = 0.0;
if (dTempB > 1.0)
dTempB = 1.0;
if (dTempB < 0)
dTempB = 0.0;
iB = (uchar) (dTempB * 255);
iG = (uchar) (dTempG * 255);
iR = (uchar) (dTempR * 255);
int t = 0;
t |= (iR);
t |= (iG << 8);
t |= (iB << 16);
g_rgb[tid] = t;
}
bool GPU_Rgb2Hsi(const IplImage* img, CvMat* dataH, CvMat* dataS,
CvMat* dataI) {
int * d_rgb;
float *d_h;
float *d_s;
float *d_i;
int * h_rgb;
float *h_h;
float *h_s;
float *h_i;
int len = img->width * img->height;
cudaMalloc((void **) &d_rgb, len * sizeof(int));
cudaMalloc((void **) &d_h, len * sizeof(float));
cudaMalloc((void **) &d_s, len * sizeof(float));
cudaMalloc((void **) &d_i, len * sizeof(float));
h_rgb = (int *) malloc(len * sizeof(int));
h_h = (float *) malloc(len * sizeof(float));
h_s = (float *) malloc(len * sizeof(float));
h_i = (float *) malloc(len * sizeof(float));
cudaDeviceProp deviceProp;
int deviceNum;
cudaGetDevice(&deviceNum);
cudaGetDeviceProperties(&deviceProp, deviceNum);
int maxThread = deviceProp.maxThreadsPerBlock;
dim3 thread = dim3(maxThread <= len ? maxThread : len);
dim3 block = dim3((len + maxThread - 1) / maxThread);
// restore the rgb data, r(byte),g(byte),b(byte)store in 32-bit word memory
for (int i = 0; i < img->height; i++) {
for (int j = 0; j < img->width; j++) {
uchar *data = (uchar *) img->imageData + i * img->widthStep + j * 3;
uchar img_b = (uchar) *data;
data++;
uchar img_g = (uchar) *data;
data++;
uchar img_r = (uchar) *data;
int t = 0;
t |= (img_r);
t |= (img_g << 8);
t |= (img_b << 16);
h_rgb[i * img->width + j] = t;
}
}
cudaMemcpy(d_rgb, h_rgb, sizeof(int) * len, cudaMemcpyHostToDevice);
g_rgb2hsi<<<block, thread>>>(d_rgb, d_h, d_s, d_i, len);
cudaDeviceSynchronize();
checkLastCudaError();
cudaMemcpy(h_h, d_h, sizeof(float) * len, cudaMemcpyDeviceToHost);
cudaMemcpy(h_s, d_s, sizeof(float) * len, cudaMemcpyDeviceToHost);
cudaMemcpy(h_i, d_i, sizeof(float) * len, cudaMemcpyDeviceToHost);
for (int i = 0; i < img->height; i++) {
for (int j = 0; j < img->width; j++) {
int t = i * img->width + j;
cvmSet(dataH, i, j, h_h[t]);
cvmSet(dataS, i, j, h_s[t]);
cvmSet(dataI, i, j, h_i[t]);
}
}
free(h_rgb);
free(h_h);
free(h_s);
free(h_i);
cudaFree(d_rgb);
cudaFree(d_h);
cudaFree(d_s);
cudaFree(d_i);
return true;
}
bool GPU_Hsi2Rgb(IplImage* img, CvMat* dataH, CvMat* dataS, CvMat* dataI) {
int * d_rgb;
float *d_h;
float *d_s;
float *d_i;
int * h_rgb;
float *h_h;
float *h_s;
float *h_i;
int len = img->width * img->height;
cudaMalloc((void **) &d_rgb, len * sizeof(int));
cudaMalloc((void **) &d_h, len * sizeof(float));
cudaMalloc((void **) &d_s, len * sizeof(float));
cudaMalloc((void **) &d_i, len * sizeof(float));
h_rgb = (int *) malloc(len * sizeof(int));
h_h = (float *) malloc(len * sizeof(float));
h_s = (float *) malloc(len * sizeof(float));
h_i = (float *) malloc(len * sizeof(float));
cudaDeviceProp deviceProp;
int deviceNum;
cudaGetDevice(&deviceNum);
cudaGetDeviceProperties(&deviceProp, deviceNum);
int maxThread = deviceProp.maxThreadsPerBlock;
dim3 thread = dim3(maxThread <= len ? maxThread : len);
dim3 block = dim3((len + maxThread - 1) / maxThread);
for (int i = 0; i < img->height; i++) {
for (int j = 0; j < img->width; j++) {
float dH = cvmGet(dataH, i, j);
float dS = cvmGet(dataS, i, j);
float dI = cvmGet(dataI, i, j);
int t = i * img->width + j;
h_h[t] = dH;
h_s[t] = dS;
h_i[t] = dI;
}
}
cudaMemcpy(d_h, h_h, sizeof(float) * len, cudaMemcpyHostToDevice);
cudaMemcpy(d_s, h_s, sizeof(float) * len, cudaMemcpyHostToDevice);
cudaMemcpy(d_i, h_i, sizeof(float) * len, cudaMemcpyHostToDevice);
g_hsi2rgb<<<block, thread>>>(d_rgb, d_h, d_s, d_i, len, pii);
cudaDeviceSynchronize();
cudaMemcpy(h_rgb, d_rgb, sizeof(int) * len, cudaMemcpyDeviceToHost);
for (int i = 0; i < img->height; i++) {
for (int j = 0; j < img->width; j++) {
int id = i * img->width + j;
int data = h_rgb[id];
int t = (1 << 8) - 1;
uchar img_r = data & t;
data = data >> 8;
uchar img_g = data & t;
data = data >> 8;
uchar img_b = data & t;
cvSet2D(img, i, j, cvScalar(img_b, img_g, img_r));
}
}
free(h_rgb);
free(h_h);
free(h_s);
free(h_i);
cudaFree(d_rgb);
cudaFree(d_h);
cudaFree(d_s);
cudaFree(d_i);
//printf("%d %d\n",sizeof(uchar), sizeof(char));
return true;
}
void GPU(char * path) {
IplImage * Img = cvLoadImage(path, 1);
CvMat * HImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * SImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * IImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
float start = clock();
if (GPU_Rgb2Hsi(Img, HImg, SImg, IImg) == 0)
{
printf("Convert Error!\n");
exit(-1);
}
EqualizeHist(IImg);
if (GPU_Hsi2Rgb(Img, HImg, SImg, IImg) == 0) {
printf("Convert Error!\n");
exit(-1);
}
float end = clock();
printf("time = %fms\n", 1000.0f * (end - start) / CLOCKS_PER_SEC);
cvNamedWindow("GPU_1", 1);
cvShowImage("GPU_1", Img);
cvWaitKey(0);
}
__global__ void g_map(float *d_i, int *d_histogram, int len, float num, float Min) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= len) {
return;
}
__shared__ int h[256];
if(threadIdx.x < 256)
{
h[threadIdx.x] = d_histogram[threadIdx.x];
}
__syncthreads();
int t = 255.0 * (d_i[tid]) + 0.5;
if (t > 255)
t = 255;
else if (t < 0)
t = 0;
t = h[t];
//printf("%d %d\n", t, h_histogram[t]);
d_i[tid] = (float) (1.0 * t - Min) / num;
}
void new_GPU(char * path) {
IplImage * Img = cvLoadImage(path, 1);
CvMat * HImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * SImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * IImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
float start = clock();
int *d_rgb;
float *d_h;
float *d_s;
float *d_i;
int *d_histogram;
int *h_rgb;
float *h_h;
float *h_s;
float *h_i;
int *h_histogram;
int len = Img->width * Img->height;
cudaMalloc((void **) &d_rgb, len * sizeof(int));
cudaMalloc((void **) &d_h, len * sizeof(float));
cudaMalloc((void **) &d_s, len * sizeof(float));
cudaMalloc((void **) &d_i, len * sizeof(float));
cudaMalloc((void **) &d_histogram, 256 * sizeof(float));
h_rgb = (int *) malloc(len * sizeof(int));
h_h = (float *) malloc(len * sizeof(float));
h_s = (float *) malloc(len * sizeof(float));
h_i = (float *) malloc(len * sizeof(float));
h_histogram = (int *) malloc(256 * sizeof(int));
cudaDeviceProp deviceProp;
int deviceNum;
cudaGetDevice(&deviceNum);
cudaGetDeviceProperties(&deviceProp, deviceNum);
int maxThread = deviceProp.maxThreadsPerBlock;
dim3 thread = dim3(maxThread <= len ? maxThread : len);
dim3 block = dim3((len + maxThread - 1) / maxThread);
// restore the rgb data, r(byte),g(byte),b(byte)store in 32-bit word memory
for (int i = 0; i < Img->height; i++) {
for (int j = 0; j < Img->width; j++) {
uchar *data = (uchar *) Img->imageData + i * Img->widthStep + j * 3;
uchar img_b = (uchar) *data;
data++;
uchar img_g = (uchar) *data;
data++;
uchar img_r = (uchar) *data;
int t = 0;
t |= (img_r);
t |= (img_g << 8);
t |= (img_b << 16);
h_rgb[i * Img->width + j] = t;
}
}
//TODO rbg2hsi
cudaMemcpy(d_rgb, h_rgb, sizeof(int) * len, cudaMemcpyHostToDevice);
g_rgb2hsi<<<block, thread>>>(d_rgb, d_h, d_s, d_i, len);
cudaDeviceSynchronize();
checkLastCudaError();
//TODO EqualizeHist
cudaMemcpy(h_i, d_i, sizeof(float) * len, cudaMemcpyDeviceToHost);
memset(h_histogram, 0, sizeof(int) * 256);
for (int y = 0; y < Img->height; y++) {
for (int x = 0; x < Img->width; x++) {
int id = y * Img->width + x;
int t = 255.0 * h_i[id] + 0.5;
if (t > 255)
t = 255;
else if (t < 0)
t = 0;
h_histogram[(int) t]++;
}
}
int Min = 1000000000;
for (int i = 1; i < 255; i++) {
h_histogram[i] += h_histogram[i - 1];
if (h_histogram[i] < Min)
Min = h_histogram[i];
//printf("%d\n",h_histogram[i]);
}
//udaMemcpy(d_i, h_i, sizeof(float) * len, cudaMemcpyHostToDevice);
cudaMemcpy(d_histogram, h_histogram, sizeof(int) * 256, cudaMemcpyHostToDevice);
int num = Img->height * Img->width - Min;
g_map<<<block,thread>>>(d_i, d_histogram,num, len,Min);
//TODO hsi2rgb
g_hsi2rgb<<<block, thread>>>(d_rgb, d_h, d_s, d_i, len, pii);
cudaDeviceSynchronize();
checkLastCudaError();
cudaMemcpy(h_rgb, d_rgb, sizeof(int) * len, cudaMemcpyDeviceToHost);
for (int i = 0; i < Img->height; i++) {
for (int j = 0; j < Img->width; j++) {
int id = i * Img->width + j;
int data = h_rgb[id];
int t = (1 << 8) - 1;
uchar img_r = data & t;
data = data >> 8;
uchar img_g = data & t;
data = data >> 8;
uchar img_b = data & t;
cvSet2D(Img, i, j, cvScalar(img_b, img_g, img_r));
}
}
float end = clock();
free(h_rgb);
free(h_h);
free(h_s);
free(h_i);
free(h_histogram);
cudaFree(d_histogram);
cudaFree(d_rgb);
cudaFree(d_h);
cudaFree(d_s);
cudaFree(d_i);
cudaFree(d_histogram);
printf("time = %fms\n", 1000.0f * (end - start) / CLOCKS_PER_SEC);
cvNamedWindow("GPU_2", 1);
cvShowImage("GPU_2", Img);
cvWaitKey(0);
}
void CPU(char * path) {
IplImage * Img = cvLoadImage(path, 1);
CvMat * HImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * SImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
CvMat * IImg = cvCreateMat(Img->height, Img->width, CV_32FC1);
float start = clock();
if (Rgb2Hsi(Img, HImg, SImg, IImg) == 0) {
printf("Convert Error!\n");
exit(-1);
}
EqualizeHist(IImg);
if (Hsi2Rgb(Img, HImg, SImg, IImg) == 0)
{
printf("Convert Error!\n");
exit(-1);
}
float end = clock();
printf("time = %fms\n", 1000.0f * (end - start) / CLOCKS_PER_SEC);
cvNamedWindow("CPU", 1);
cvShowImage("CPU", Img);
cvWaitKey(0);
}
int main(int argc, char ** argv) {
CPU("1.jpg");
GPU("1.jpg");
new_GPU("1.jpg");
return 0;
}
|
7e218232f6dcba8586dad8edf7c947c7073cca6b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author: Deren Kong (Kong.325)
CSE5441 lab4
Description: a simple cuda program to accomplish producer and consumer function
Compile:
qsub -I -l walltime=00:59:00 -l nodes=1:gpus=1,mem=4gb -A PAS1421
sh ./fire_cuda.sh
Submitted on : 11/27/2018
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <string.h>
#include <time.h>
#include "pc4.h"
#define EMPTY 9999
__device__ u_int16_t transformA(u_int16_t input_val);
__device__ u_int16_t transformB(u_int16_t input_val);
__device__ u_int16_t transformC(u_int16_t input_val);
__device__ u_int16_t transformD(u_int16_t input_val);
//kernel for producer
__global__ void transform(char* cmd, u_int16_t *pre, u_int16_t *aft) {
int id = threadIdx.x;
if (cmd[id] == 'A') {
aft[id] = transformA(pre[id]);
}
if (cmd[id] == 'B') {
aft[id] = transformB(pre[id]);
}
if (cmd[id] == 'C') {
aft[id] = transformC(pre[id]);
}
if (cmd[id] == 'D') {
aft[id] = transformD(pre[id]);
}
}
//kernel for consumer
__global__ void transform_print(char* cmd, u_int16_t *pre, u_int16_t *aft) {
int id = threadIdx.x;
if (cmd[id] == 'A') {
aft[id] = transformA(pre[id]);
}
if (cmd[id] == 'B') {
aft[id] = transformB(pre[id]);
}
if (cmd[id] == 'C') {
aft[id] = transformC(pre[id]);
}
if (cmd[id] == 'D') {
aft[id] = transformD(pre[id]);
}
printf("Q:%d %c %hd %hd\n",id, cmd[id], pre[id], aft[id]);
}
queue *init_queue() {
queue * queue_list;
queue_list = (queue*) malloc (sizeof(queue));
for (int i = 0; i < N; i++) {
queue_list->work_queue[i].cmd = ' ';
queue_list->work_queue[i].key = EMPTY;
}
queue_list->empty = true;
queue_list->full = false;
queue_list->head = 0;
queue_list->tail = 0;
return queue_list;
}
void destroy_queue(queue *q) {
free(q);
}
void add_queue(queue *q, work_entry element) {
q->work_queue[q->tail] = element;
q->tail++;
if (q->tail == N) {
q->tail = 0;
}
if (q->tail == q->head) {
q->full = true;
}
q->empty = false;
}
void del_queue(queue *q, work_entry *out) {
*out = q->work_queue[q->head];
q->head++;
if (q->head == N) {
q->head = 0;
}
if (q->head == q->tail) {
q->empty = true;
}
q->full = false;
}
//producer input all the data to cuda and when it finished,
//just fire consumer
void producer(queue *queue_list, int *run_time, double *run_time2) {
char buffer[20];
char temp_key[10];
char cmd = ' ';
while(fgets(buffer, 20, stdin) != NULL) {
sscanf(buffer, "%c %s", &cmd, temp_key);
u_int16_t key = (u_int16_t)(atoi(temp_key));
work_entry buf;
if (cmd == 'X') {
break;
}
if (cmd != 'A' && cmd != 'B' && cmd != 'C' && cmd != 'D') {
continue;
}
if (key <= 1000) {
buf.cmd = cmd;
buf.key = key;
add_queue(queue_list, buf);
}
}
printf ("finish reading!\n");
int num_blocks = 1;
int num_th_per_blk = queue_list->tail;
// if N is larger than 2048, then just set numGrid = 10 and numBlock = N/10
// if N / 10 has remainer, then just padding it.
if (N >= 2048) {
num_blocks = 10;
num_th_per_blk = ceil((double)num_th_per_blk / 10.0);
}
clock_t t;
t = clock();
time_t timer;
timer = time(NULL);
char * pre_cmd_h, *pre_cmd_d;
u_int16_t *pre_key_h, *aft_key_h, *pre_key_d, *aft_key_d;
size_t memSize_cmd = num_blocks*num_th_per_blk*sizeof(char);
size_t memSize_key = num_blocks*num_th_per_blk*sizeof(u_int16_t);
pre_cmd_h = (char*) malloc(memSize_cmd);
pre_key_h = (u_int16_t*) malloc(memSize_key);
aft_key_h = (u_int16_t*)malloc(memSize_key);
// save all keys and cmds to arrays
for (int i = 0; i < num_th_per_blk; i++) {
pre_cmd_h[i] = queue_list->work_queue[i].cmd;
pre_key_h[i] = queue_list->work_queue[i].key;
}
hipMalloc((void**)&pre_cmd_d, memSize_cmd);
hipMalloc((void**)&pre_key_d, memSize_key);
hipMalloc((void**)&aft_key_d, memSize_key);
hipMemcpy(pre_cmd_d, pre_cmd_h, memSize_cmd, hipMemcpyHostToDevice);
hipMemcpy(pre_key_d, pre_key_h, memSize_key, hipMemcpyHostToDevice);
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_th_per_blk);
hipLaunchKernelGGL(( transform) , dim3(dimGrid), dim3(dimBlock) , 0, 0, pre_cmd_d, pre_key_d, aft_key_d);
hipMemcpy(pre_cmd_h, pre_cmd_d, memSize_cmd, hipMemcpyDeviceToHost);
hipMemcpy(pre_key_h, pre_key_d, memSize_key, hipMemcpyDeviceToHost);
hipMemcpy(aft_key_h, aft_key_d, memSize_key, hipMemcpyDeviceToHost);
hipFree(pre_cmd_d);
hipFree(pre_key_d);
hipFree(aft_key_d);
consumer(pre_cmd_h, aft_key_h, num_th_per_blk);
time_t cur = time(NULL);
*(run_time2) += difftime(cur,timer);
t = clock() - t;
*(run_time) += t;
}
// consumer are able to transform the encoded key
// and print result with stdout
void consumer(char* cmd, u_int16_t* key, int size) {
char* cmd_d;
u_int16_t *key_d, *aft_key_h, *aft_key_d;
int num_blocks = 1;
int num_th_per_blk = size;
// same as consumer, just used when dimGrid and dimBolck changed in producer
if (N >= 2048) {
num_blocks = 10;
}
size_t memSize_cmd = num_blocks*num_th_per_blk*sizeof(char);
size_t memSize_key = num_blocks*num_th_per_blk*sizeof(u_int16_t);
hipMalloc((void**) &cmd_d, memSize_cmd);
hipMalloc((void**) &key_d, memSize_key);
hipMalloc((void**) &aft_key_d, memSize_key);
hipMemcpy(key_d, key, memSize_key, hipMemcpyHostToDevice);
hipMemcpy(cmd_d, cmd, memSize_cmd, hipMemcpyHostToDevice);
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_th_per_blk);
hipLaunchKernelGGL(( transform_print) , dim3(dimGrid), dim3(dimBlock) , 0, 0, cmd_d, key_d, aft_key_d);
hipFree(cmd_d);
hipFree(key_d);
hipFree(aft_key_d);
}
int main(int argc, char* argv[]) {
queue * queue_list = init_queue();
if (queue_list == NULL) {
printf("Failed to init!\n");
}
int run_time = 0;
double run_time2 = 0.0;
producer(queue_list, &run_time, &run_time2);
printf("The total runtime of producer and consumer is %d clicks (%f seconds).\n", (run_time),((float )run_time)/(CLOCKS_PER_SEC));
printf("The total runtime2 is %lf seconds.\n", (run_time2));
destroy_queue(queue_list);
return 0;
}
|
7e218232f6dcba8586dad8edf7c947c7073cca6b.cu
|
/*
Author: Deren Kong (Kong.325)
CSE5441 lab4
Description: a simple cuda program to accomplish producer and consumer function
Compile:
qsub -I -l walltime=00:59:00 -l nodes=1:gpus=1,mem=4gb -A PAS1421
sh ./fire_cuda.sh
Submitted on : 11/27/2018
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <string.h>
#include <time.h>
#include "pc4.h"
#define EMPTY 9999
__device__ u_int16_t transformA(u_int16_t input_val);
__device__ u_int16_t transformB(u_int16_t input_val);
__device__ u_int16_t transformC(u_int16_t input_val);
__device__ u_int16_t transformD(u_int16_t input_val);
//kernel for producer
__global__ void transform(char* cmd, u_int16_t *pre, u_int16_t *aft) {
int id = threadIdx.x;
if (cmd[id] == 'A') {
aft[id] = transformA(pre[id]);
}
if (cmd[id] == 'B') {
aft[id] = transformB(pre[id]);
}
if (cmd[id] == 'C') {
aft[id] = transformC(pre[id]);
}
if (cmd[id] == 'D') {
aft[id] = transformD(pre[id]);
}
}
//kernel for consumer
__global__ void transform_print(char* cmd, u_int16_t *pre, u_int16_t *aft) {
int id = threadIdx.x;
if (cmd[id] == 'A') {
aft[id] = transformA(pre[id]);
}
if (cmd[id] == 'B') {
aft[id] = transformB(pre[id]);
}
if (cmd[id] == 'C') {
aft[id] = transformC(pre[id]);
}
if (cmd[id] == 'D') {
aft[id] = transformD(pre[id]);
}
printf("Q:%d %c %hd %hd\n",id, cmd[id], pre[id], aft[id]);
}
queue *init_queue() {
queue * queue_list;
queue_list = (queue*) malloc (sizeof(queue));
for (int i = 0; i < N; i++) {
queue_list->work_queue[i].cmd = ' ';
queue_list->work_queue[i].key = EMPTY;
}
queue_list->empty = true;
queue_list->full = false;
queue_list->head = 0;
queue_list->tail = 0;
return queue_list;
}
void destroy_queue(queue *q) {
free(q);
}
void add_queue(queue *q, work_entry element) {
q->work_queue[q->tail] = element;
q->tail++;
if (q->tail == N) {
q->tail = 0;
}
if (q->tail == q->head) {
q->full = true;
}
q->empty = false;
}
void del_queue(queue *q, work_entry *out) {
*out = q->work_queue[q->head];
q->head++;
if (q->head == N) {
q->head = 0;
}
if (q->head == q->tail) {
q->empty = true;
}
q->full = false;
}
//producer input all the data to cuda and when it finished,
//just fire consumer
void producer(queue *queue_list, int *run_time, double *run_time2) {
char buffer[20];
char temp_key[10];
char cmd = ' ';
while(fgets(buffer, 20, stdin) != NULL) {
sscanf(buffer, "%c %s", &cmd, temp_key);
u_int16_t key = (u_int16_t)(atoi(temp_key));
work_entry buf;
if (cmd == 'X') {
break;
}
if (cmd != 'A' && cmd != 'B' && cmd != 'C' && cmd != 'D') {
continue;
}
if (key <= 1000) {
buf.cmd = cmd;
buf.key = key;
add_queue(queue_list, buf);
}
}
printf ("finish reading!\n");
int num_blocks = 1;
int num_th_per_blk = queue_list->tail;
// if N is larger than 2048, then just set numGrid = 10 and numBlock = N/10
// if N / 10 has remainer, then just padding it.
if (N >= 2048) {
num_blocks = 10;
num_th_per_blk = ceil((double)num_th_per_blk / 10.0);
}
clock_t t;
t = clock();
time_t timer;
timer = time(NULL);
char * pre_cmd_h, *pre_cmd_d;
u_int16_t *pre_key_h, *aft_key_h, *pre_key_d, *aft_key_d;
size_t memSize_cmd = num_blocks*num_th_per_blk*sizeof(char);
size_t memSize_key = num_blocks*num_th_per_blk*sizeof(u_int16_t);
pre_cmd_h = (char*) malloc(memSize_cmd);
pre_key_h = (u_int16_t*) malloc(memSize_key);
aft_key_h = (u_int16_t*)malloc(memSize_key);
// save all keys and cmds to arrays
for (int i = 0; i < num_th_per_blk; i++) {
pre_cmd_h[i] = queue_list->work_queue[i].cmd;
pre_key_h[i] = queue_list->work_queue[i].key;
}
cudaMalloc((void**)&pre_cmd_d, memSize_cmd);
cudaMalloc((void**)&pre_key_d, memSize_key);
cudaMalloc((void**)&aft_key_d, memSize_key);
cudaMemcpy(pre_cmd_d, pre_cmd_h, memSize_cmd, cudaMemcpyHostToDevice);
cudaMemcpy(pre_key_d, pre_key_h, memSize_key, cudaMemcpyHostToDevice);
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_th_per_blk);
transform <<< dimGrid, dimBlock >>>(pre_cmd_d, pre_key_d, aft_key_d);
cudaMemcpy(pre_cmd_h, pre_cmd_d, memSize_cmd, cudaMemcpyDeviceToHost);
cudaMemcpy(pre_key_h, pre_key_d, memSize_key, cudaMemcpyDeviceToHost);
cudaMemcpy(aft_key_h, aft_key_d, memSize_key, cudaMemcpyDeviceToHost);
cudaFree(pre_cmd_d);
cudaFree(pre_key_d);
cudaFree(aft_key_d);
consumer(pre_cmd_h, aft_key_h, num_th_per_blk);
time_t cur = time(NULL);
*(run_time2) += difftime(cur,timer);
t = clock() - t;
*(run_time) += t;
}
// consumer are able to transform the encoded key
// and print result with stdout
void consumer(char* cmd, u_int16_t* key, int size) {
char* cmd_d;
u_int16_t *key_d, *aft_key_h, *aft_key_d;
int num_blocks = 1;
int num_th_per_blk = size;
// same as consumer, just used when dimGrid and dimBolck changed in producer
if (N >= 2048) {
num_blocks = 10;
}
size_t memSize_cmd = num_blocks*num_th_per_blk*sizeof(char);
size_t memSize_key = num_blocks*num_th_per_blk*sizeof(u_int16_t);
cudaMalloc((void**) &cmd_d, memSize_cmd);
cudaMalloc((void**) &key_d, memSize_key);
cudaMalloc((void**) &aft_key_d, memSize_key);
cudaMemcpy(key_d, key, memSize_key, cudaMemcpyHostToDevice);
cudaMemcpy(cmd_d, cmd, memSize_cmd, cudaMemcpyHostToDevice);
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_th_per_blk);
transform_print <<< dimGrid, dimBlock >>>(cmd_d, key_d, aft_key_d);
cudaFree(cmd_d);
cudaFree(key_d);
cudaFree(aft_key_d);
}
int main(int argc, char* argv[]) {
queue * queue_list = init_queue();
if (queue_list == NULL) {
printf("Failed to init!\n");
}
int run_time = 0;
double run_time2 = 0.0;
producer(queue_list, &run_time, &run_time2);
printf("The total runtime of producer and consumer is %d clicks (%f seconds).\n", (run_time),((float )run_time)/(CLOCKS_PER_SEC));
printf("The total runtime2 is %lf seconds.\n", (run_time2));
destroy_queue(queue_list);
return 0;
}
|
cf4d403ac5716f4e038cb6269c5fa0853eeb856e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file
* Copyright (c) 2011-2020, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_huffman_gpu_decoder.h"
#include <libgpujpeg/gpujpeg_util.h>
/**
* Entry of pre-built Huffman fast-decoding table.
*/
struct gpujpeg_table_huffman_decoder_entry {
int value_nbits;
};
/**
* 4 pre-built tables for faster Huffman decoding (codewords up-to 16 bit length):
* - 0x00000 to 0x0ffff: luminance DC table
* - 0x10000 to 0x1ffff: luminance AC table
* - 0x20000 to 0x2ffff: chrominance DC table
* - 0x30000 to 0x3ffff: chrominance AC table
*
* Each entry consists of:
* - Number of bits of code corresponding to this entry (0 - 16, both inclusive) - bits 4 to 8
* - Number of run-length coded zeros before currently decoded coefficient + 1 (1 - 64, both inclusive) - bits 9 to 15
* - Number of bits representing the value of currently decoded coefficient (0 - 15, both inclusive) - bits 0 to 3
* @code
* bit #: 15 9 8 4 3 0
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* value: | RLE zero count | code bit size | value bit size|
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* @endcode
*/
__device__ uint16_t gpujpeg_huffman_gpu_decoder_tables_full[4 * (1 << 16)];
/** Number of code bits to be checked first (with high chance for the code to fit into this number of bits). */
#define QUICK_CHECK_BITS 10
#define QUICK_TABLE_ITEMS (4 * (1 << QUICK_CHECK_BITS))
// TODO: try to tweak QUICK table size and memory space
/** Table with same format as the full table, except that all-zero-entry means that the full table should be consulted. */
__device__ uint16_t gpujpeg_huffman_gpu_decoder_tables_quick[QUICK_TABLE_ITEMS];
/** Same table as above, but copied into constant memory */
__constant__ uint16_t gpujpeg_huffman_gpu_decoder_tables_quick_const[QUICK_TABLE_ITEMS];
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_decoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
// /**
// * Fill more bit to current get buffer
// *
// * @param get_bits
// * @param get_buff
// * @param data
// * @param data_size
// * @return void
// */
// __device__ inline void
// gpujpeg_huffman_gpu_decoder_decode_fill_bit_buffer(int & get_bits, int & get_buff, uint8_t* & data, int & data_size)
// {
// while ( get_bits < 25 ) {
// //Are there some data?
// if( data_size > 0 ) {
// // Attempt to read a byte
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// unsigned char uc = *data++;
// data_size--;
//
// // If it's 0xFF, check and discard stuffed zero byte
// if ( uc == 0xFF ) {
// while ( uc == 0xFF ) {
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// uc = *data++;
// data_size--;
// }
//
// if ( uc == 0 ) {
// // Found FF/00, which represents an FF data byte
// uc = 0xFF;
// } else {
// // There should be enough bits still left in the data segment;
// // if so, just break out of the outer while loop.
// //if (m_nGetBits >= nbits)
// if ( get_bits >= 0 )
// break;
// }
// }
//
// get_buff = (get_buff << 8) | ((int) uc);
// get_bits += 8;
// }
// else
// break;
// }
// }
/**
* Loads at least specified number of bits into the register
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_load_bits(
const unsigned int required_bit_count, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx
) {
// Add bytes until have enough
while(r_bit_count < required_bit_count) {
// Load byte value and posibly skip next stuffed byte if loaded byte's value is 0xFF
const uint8_t byte_value = ((const uint8_t*)s_byte)[s_byte_idx++];
if((uint8_t)0xFF == byte_value) {
s_byte_idx++;
}
// Add newly loaded byte to the buffer, updating bit count
r_bit = (r_bit << 8) + byte_value;
r_bit_count += 8;
}
}
/**
* Get bits
*
* @param nbits Number of bits to get
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return bits
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_get_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// update remaining bit count
r_bit_count -= nbits;
// return bits
return (r_bit >> r_bit_count) & ((1 << nbits) - 1);
}
/**
* Gets bits without removing them from the buffer.
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_peek_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return bits
return (r_bit >> (r_bit_count - nbits)) & ((1 << nbits) - 1);
}
/**
* Removes some bits from the buffer (assumes that they are there)
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_discard_bits(const unsigned int nb, unsigned int, unsigned int & r_bit_count) {
r_bit_count -= nb;
}
/**
* Special Huffman decode:
* (1) For codes with length > 8
* (2) For codes with length < 8 while data is finished
*
* @param table
* @param min_bits
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_special_decode(
const struct gpujpeg_table_huffman_decoder* const table, int min_bits, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx)
{
// HUFF_DECODE has determined that the code is at least min_bits
// bits long, so fetch that many bits in one swoop.
int code = gpujpeg_huffman_gpu_decoder_get_bits(min_bits, r_bit, r_bit_count, s_byte, s_byte_idx);
// Collect the rest of the Huffman code one bit at a time.
// This is per Figure F.16 in the JPEG spec.
int l = min_bits;
while ( code > table->maxcode[l] ) {
code <<= 1;
code |= gpujpeg_huffman_gpu_decoder_get_bits(1, r_bit, r_bit_count, s_byte, s_byte_idx);
l++;
}
// With garbage input we may reach the sentinel value l = 17.
if ( l > 16 ) {
// Fake a zero as the safest result
return 0;
}
return table->huffval[table->valptr[l] + (int)(code - table->mincode[l])];
}
/**
* To find dc or ac value according to code and its bit length s
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_value_from_category(int nbits, int code)
{
// TODO: try to replace with __constant__ table lookup
return code < ((1 << nbits) >> 1) ? (code + ((-1) << nbits) + 1) : code;
// // Method 1:
// // On some machines, a shift and add will be faster than a table lookup.
// // #define HUFF_EXTEND(x,s) \
// // ((x)< (1<<((s)-1)) ? (x) + (((-1)<<(s)) + 1) : (x))
//
// // Method 2: Table lookup
// // If (offset < half[category]), then value is below zero
// // Otherwise, value is above zero, and just the offset
// // entry n is 2**(n-1)
// const int half[16] = {
// 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040,
// 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000
// };
//
// //start[i] is the starting value in this category; surely it is below zero
// // entry n is (-1 << n) + 1
// const int start[16] = {
// 0, ((-1)<<1) + 1, ((-1)<<2) + 1, ((-1)<<3) + 1, ((-1)<<4) + 1,
// ((-1)<<5) + 1, ((-1)<<6) + 1, ((-1)<<7) + 1, ((-1)<<8) + 1,
// ((-1)<<9) + 1, ((-1)<<10) + 1, ((-1)<<11) + 1, ((-1)<<12) + 1,
// ((-1)<<13) + 1, ((-1)<<14) + 1, ((-1)<<15) + 1
// };
//
// return (code < half[nbits]) ? (code + start[nbits]) : code;
}
/**
* Decodes next coefficient, updating its output index
*
* @param table
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_get_coefficient(
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const unsigned int table_offset, unsigned int & coefficient_idx)
{
// Peek next 16 bits and use them as an index into decoder table to find all the info.
const unsigned int table_idx = table_offset + gpujpeg_huffman_gpu_decoder_peek_bits(16, r_bit, r_bit_count, s_byte, s_byte_idx);
// Try the quick table first (use the full table only if not succeded with the quick table)
unsigned int packed_info = gpujpeg_huffman_gpu_decoder_tables_quick_const[table_idx >> (16 - QUICK_CHECK_BITS)];
if(0 == packed_info) {
packed_info = gpujpeg_huffman_gpu_decoder_tables_full[table_idx];
}
// remove the right number of bits from the bit buffer
gpujpeg_huffman_gpu_decoder_discard_bits((packed_info >> 4) & 0x1F, r_bit, r_bit_count);
// update coefficient index by skipping run-length encoded zeros
coefficient_idx += packed_info >> 9;
// read coefficient bits and decode the coefficient from them
const unsigned int value_nbits = packed_info & 0xF;
const unsigned int value_code = gpujpeg_huffman_gpu_decoder_get_bits(value_nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return deocded coefficient
return gpujpeg_huffman_gpu_decoder_value_from_category(value_nbits, value_code);
}
/**
* Decode one 8x8 block
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_block(
int & dc, int16_t* const data_output, const unsigned int dc_table_offset, const unsigned int ac_table_offset,
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const uint4* & d_byte, unsigned int & d_byte_chunk_count)
{
// TODO: try unified decoding of DC/AC coefficients
// Index of next coefficient to be decoded (in ZIG-ZAG order)
unsigned int coefficient_idx = 0;
// Section F.2.2.1: decode the DC coefficient difference
// Get the coefficient value (using DC coding table)
int dc_coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(r_bit, r_bit_count, s_byte, s_byte_idx, dc_table_offset, coefficient_idx);
// Convert DC difference to actual value, update last_dc_val
dc = dc_coefficient_value += dc;
// Output the DC coefficient (assumes gpujpeg_natural_order[0] = 0)
// TODO: try to skip saving of zero coefficients
data_output[0] = dc_coefficient_value;
// TODO: error check: coefficient_idx must still be 0 in valid codestreams
coefficient_idx = 1;
// Section F.2.2.2: decode the AC coefficients
// Since zeroes are skipped, output area must be cleared beforehand
do {
// Possibly load more bytes into shared buffer from global memory
if(s_byte_idx >= 16) {
// Move remaining bytes to begin and update index of next byte
s_byte[0] = s_byte[1];
s_byte_idx -= 16;
// Load another byte chunk from global memory only if there is one
if(d_byte_chunk_count) {
s_byte[1] = *(d_byte++);
d_byte_chunk_count--;
}
}
// decode next coefficient, updating its destination index
const int coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(r_bit, r_bit_count, s_byte, s_byte_idx, ac_table_offset, coefficient_idx);
// stop with this block if have all coefficients
if(coefficient_idx > 64) {
break;
}
// save the coefficient TODO: try to ommit saving 0 coefficients
data_output[gpujpeg_huffman_gpu_decoder_order_natural[coefficient_idx - 1]] = coefficient_value;
} while(coefficient_idx < 64);
return 0;
}
/**
* Huffman decoder kernel
*
* @return void
*/
template <bool SINGLE_COMP, int THREADS_PER_TBLOCK>
__global__ void
#if __CUDA_ARCH__ < 200
__launch_bounds__(THREADS_PER_TBLOCK, 2)
#else
__launch_bounds__(THREADS_PER_TBLOCK, 6)
#endif
gpujpeg_huffman_decoder_decode_kernel(
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* d_block_list,
int16_t* d_data_quantized
) {
int segment_index = blockIdx.x * THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Byte buffers in shared memory
__shared__ uint4 s_byte_all[2 * THREADS_PER_TBLOCK]; // 32 bytes per thread
uint4 * const s_byte = s_byte_all + 2 * threadIdx.x;
// Last DC coefficient values TODO: try to move into shared memory
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Get aligned compressed data chunk pointer and load first 2 chunks of the data
const unsigned int d_byte_begin_idx = segment->data_compressed_index;
const unsigned int d_byte_begin_idx_aligned = d_byte_begin_idx & ~15; // loading 16byte chunks
const uint4* d_byte = (uint4*)(d_data_compressed + d_byte_begin_idx_aligned);
// Get number of remaining global memory byte chunks (not to read bytes out of buffer)
const unsigned int d_byte_end_idx_aligned = (d_byte_begin_idx + segment->data_compressed_size + 15) & ~15;
unsigned int d_byte_chunk_count = (d_byte_end_idx_aligned - d_byte_begin_idx_aligned) / 16;
// Load first 2 chunks of compressed data into the shared memory buffer and remember index of first code byte (skipping bytes read due to alignment)
s_byte[0] = d_byte[0];
s_byte[1] = d_byte[1];
d_byte += 2;
d_byte_chunk_count = max(d_byte_chunk_count, 2) - 2;
unsigned int s_byte_idx = d_byte_begin_idx - d_byte_begin_idx_aligned;
// bits loaded into the register and their count
unsigned int r_bit_count = 0;
unsigned int r_bit = 0; // LSB-aligned
// Non-interleaving mode
if ( SINGLE_COMP ) {
// Get component for current scan
const struct gpujpeg_component* const component = d_component + segment->scan_index;
// Get huffman tables offset
const unsigned int dc_table_offset = component->dc_huff_idx * 0x20000;
const unsigned int ac_table_offset = component->ac_huff_idx * 0x20000 + 0x10000;
// Size of MCUs in this segment's component
const int component_mcu_size = component->mcu_size;
// Pointer to first MCU's output block
int16_t* block = component->d_data_quantized + segment->scan_segment_index * component->segment_mcu_count * component_mcu_size;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_decoder_decode_block(dc[0], block, dc_table_offset, ac_table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count) != 0 )
break;
// advance to next block
block += component_mcu_size;
}
}
// Interleaving mode
else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = packed_block_info & 0x7f;
// Get offset to right part of huffman table
const unsigned int dc_huffman_table_offset = d_component[last_dc_idx].dc_huff_idx * 0x20000;
const unsigned int ac_huffman_table_offset = d_component[last_dc_idx].ac_huff_idx * 0x20000 + 0x10000;
// Source data pointer
int16_t* block = d_data_quantized + (packed_block_info >> 8);
// Encode 8x8 block
gpujpeg_huffman_gpu_decoder_decode_block(dc[last_dc_idx], block, dc_huffman_table_offset, ac_huffman_table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
}
// // Encode MCUs in segment
// for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//
//
//
//
//
//
//
//
// //assert(segment->scan_index == 0);
// for ( int comp = 0; comp < comp_count; comp++ ) {
// struct gpujpeg_component* component = &d_component[comp];
//
// // Prepare mcu indexes
// int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
// int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// // Compute base data index
// int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
//
// // For all vertical 8x8 blocks
// for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// // Compute base row data index
// int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// // For all horizontal 8x8 blocks
// for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// // Compute 8x8 block data index
// int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
//
// // Get component data for MCU
// int16_t* block = &component->d_data_quantized[data_index];
//
// // Get coder parameters
// int & component_dc = dc[comp];
//
// // Get huffman tables offset
// const unsigned int table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0x00000 : 0x20000;
//
// // Encode 8x8 block
// gpujpeg_huffman_gpu_decoder_decode_block(component_dc, block, table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
// }
// }
// }
// }
}
}
/**
* Setup of one Huffman table entry for fast decoding.
* @param bits bits to extract one codeword from (first bit is bit #15, then #14, ... last is #0)
* @param d_table_src source (slow-decoding) table pointer
* @param d_table_dest destination (fast-decoding) table pointer
*/
__device__ void
gpujpeg_huffman_gpu_decoder_table_setup(
const int bits,
const struct gpujpeg_table_huffman_decoder* const d_table_src,
const int table_idx
) {
// Decode one codeword from given bits to get following:
// - minimal number of bits actually needed to decode the codeword (up to 16 bits, 0 for invalid ones)
// - category ID represented by the codeword, consisting from:
// - number of run-length-coded preceding zeros (up to 16, or 63 for both special end-of block symbol or invalid codewords)
// - bit-size of the actual value of coefficient (up to 16, 0 for invalid ones)
int code_nbits = 1, category_id = 0;
// First, decode codeword length (This is per Figure F.16 in the JPEG spec.)
int code_value = bits >> 15; // only single bit initially
while ( code_value > d_table_src->maxcode[code_nbits] ) {
code_value = bits >> (16 - ++code_nbits); // not enough to decide => try more bits
}
// With garbage input we may reach the sentinel value l = 17.
if ( code_nbits > 16 ) {
code_nbits = 0;
// category ID remains 0 for invalid symbols from garbage input
} else {
category_id = d_table_src->huffval[d_table_src->valptr[code_nbits] + code_value - d_table_src->mincode[code_nbits]];
}
// decompose category number into 1 + number of run-length coded zeros and length of the value
// (special category #0 contains all invalid codes and special end-of-block code -- all of those codes
// should terminate block decoding => use 64 run-length zeros and 0 value bits for such symbols)
const int value_nbits = 0xF & category_id;
const int rle_zero_count = category_id ? min(1 + (category_id >> 4), 64) : 64;
// save all the info into the right place in the destination table
const int packed_info = (rle_zero_count << 9) + (code_nbits << 4) + value_nbits;
gpujpeg_huffman_gpu_decoder_tables_full[(table_idx << 16) + bits] = packed_info;
// some threads also save entries into the quick table
const int dest_idx_quick = bits >> (16 - QUICK_CHECK_BITS);
if(bits == (dest_idx_quick << (16 - QUICK_CHECK_BITS))) {
// save info also into the quick table if number of required bits is less than quick
// check bit count, otherwise put 0 there to indicate that full table lookup consultation is needed
gpujpeg_huffman_gpu_decoder_tables_quick[(table_idx << QUICK_CHECK_BITS) + dest_idx_quick] = code_nbits <= QUICK_CHECK_BITS ? packed_info : 0;
}
}
/**
* Huffman decoder table setup kernel
* (Based on the original table, this kernel prepares another table, which is more suitable for fast decoding.)
*/
__global__ void
gpujpeg_huffman_decoder_table_kernel(
const struct gpujpeg_table_huffman_decoder* const d_table_y_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_y_ac,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_ac
) {
// Each thread uses all 4 Huffman tables to "decode" one symbol from its unique 16bits.
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_y_dc, 0);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_y_ac, 1);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_cbcr_dc, 2);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_cbcr_ac, 3);
}
/* Documented at declaration */
int
gpujpeg_huffman_gpu_decoder_init()
{
// Copy natural order to constant device memory
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_decoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman decoder init", return -1);
return 0;
}
/* Documented at declaration */
int
gpujpeg_huffman_gpu_decoder_decode(struct gpujpeg_decoder* decoder)
{
// Get coder
struct gpujpeg_coder* coder = &decoder->coder;
assert(coder->param.restart_interval > 0);
int comp_count = 1;
if (coder->param.interleaved == 1) {
comp_count = coder->param_image.comp_count;
}
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Number of decoder kernel threads per each threadblock
enum { THREADS_PER_TBLOCK = 192 };
// Configure more Shared memory for both kernels
hipFuncSetCacheConfig(gpujpeg_huffman_decoder_table_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK>, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK>, hipFuncCachePreferShared);
// Setup GPU tables (one thread for each of 65536 entries)
hipLaunchKernelGGL(( gpujpeg_huffman_decoder_table_kernel), dim3(256), dim3(256), 0, *(decoder->stream),
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC]
);
gpujpeg_cuda_check_error("Huffman decoder table setup failed", return -1);
// Get pointer to quick decoding table in device memory
void * d_src_ptr = 0;
hipGetSymbolAddress(&d_src_ptr, gpujpeg_huffman_gpu_decoder_tables_quick);
gpujpeg_cuda_check_error("Huffman decoder table address lookup failed", return -1);
// Copy quick decoding table into constant memory
hipMemcpyToSymbolAsync(
gpujpeg_huffman_gpu_decoder_tables_quick_const,
d_src_ptr,
sizeof(*gpujpeg_huffman_gpu_decoder_tables_quick) * QUICK_TABLE_ITEMS,
0,
hipMemcpyDeviceToDevice,
*(decoder->stream)
);
gpujpeg_cuda_check_error("Huffman decoder table copy failed", return -1);
for (int comp = 0; comp < coder->param_image.comp_count; comp++) {
coder->component[comp].dc_huff_idx = decoder->comp_table_huffman_map[comp][GPUJPEG_HUFFMAN_DC];
coder->component[comp].ac_huff_idx = decoder->comp_table_huffman_map[comp][GPUJPEG_HUFFMAN_AC];
}
// Copy updated components to device memory
hipMemcpyAsync(coder->d_component, coder->component, coder->param_image.comp_count * sizeof(struct gpujpeg_component), hipMemcpyHostToDevice, *(decoder->stream));
gpujpeg_cuda_check_error("Coder component copy", return 0);
// Run decoding kernel
dim3 thread(THREADS_PER_TBLOCK);
dim3 grid(gpujpeg_div_and_round_up(decoder->segment_count, THREADS_PER_TBLOCK));
if(comp_count == 1) {
hipLaunchKernelGGL(( gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK>), dim3(grid), dim3(thread), 0, *(decoder->stream),
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
} else {
hipLaunchKernelGGL(( gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK>), dim3(grid), dim3(thread), 0, *(decoder->stream),
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
}
gpujpeg_cuda_check_error("Huffman decoding failed", return -1);
return 0;
}
/* vi: set expandtab sw=4 : */
|
cf4d403ac5716f4e038cb6269c5fa0853eeb856e.cu
|
/**
* @file
* Copyright (c) 2011-2020, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_huffman_gpu_decoder.h"
#include <libgpujpeg/gpujpeg_util.h>
/**
* Entry of pre-built Huffman fast-decoding table.
*/
struct gpujpeg_table_huffman_decoder_entry {
int value_nbits;
};
/**
* 4 pre-built tables for faster Huffman decoding (codewords up-to 16 bit length):
* - 0x00000 to 0x0ffff: luminance DC table
* - 0x10000 to 0x1ffff: luminance AC table
* - 0x20000 to 0x2ffff: chrominance DC table
* - 0x30000 to 0x3ffff: chrominance AC table
*
* Each entry consists of:
* - Number of bits of code corresponding to this entry (0 - 16, both inclusive) - bits 4 to 8
* - Number of run-length coded zeros before currently decoded coefficient + 1 (1 - 64, both inclusive) - bits 9 to 15
* - Number of bits representing the value of currently decoded coefficient (0 - 15, both inclusive) - bits 0 to 3
* @code
* bit #: 15 9 8 4 3 0
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* value: | RLE zero count | code bit size | value bit size|
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* @endcode
*/
__device__ uint16_t gpujpeg_huffman_gpu_decoder_tables_full[4 * (1 << 16)];
/** Number of code bits to be checked first (with high chance for the code to fit into this number of bits). */
#define QUICK_CHECK_BITS 10
#define QUICK_TABLE_ITEMS (4 * (1 << QUICK_CHECK_BITS))
// TODO: try to tweak QUICK table size and memory space
/** Table with same format as the full table, except that all-zero-entry means that the full table should be consulted. */
__device__ uint16_t gpujpeg_huffman_gpu_decoder_tables_quick[QUICK_TABLE_ITEMS];
/** Same table as above, but copied into constant memory */
__constant__ uint16_t gpujpeg_huffman_gpu_decoder_tables_quick_const[QUICK_TABLE_ITEMS];
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_decoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
// /**
// * Fill more bit to current get buffer
// *
// * @param get_bits
// * @param get_buff
// * @param data
// * @param data_size
// * @return void
// */
// __device__ inline void
// gpujpeg_huffman_gpu_decoder_decode_fill_bit_buffer(int & get_bits, int & get_buff, uint8_t* & data, int & data_size)
// {
// while ( get_bits < 25 ) {
// //Are there some data?
// if( data_size > 0 ) {
// // Attempt to read a byte
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// unsigned char uc = *data++;
// data_size--;
//
// // If it's 0xFF, check and discard stuffed zero byte
// if ( uc == 0xFF ) {
// while ( uc == 0xFF ) {
// //printf("read byte %X 0x%X\n", (int)data, (unsigned char)*data);
// uc = *data++;
// data_size--;
// }
//
// if ( uc == 0 ) {
// // Found FF/00, which represents an FF data byte
// uc = 0xFF;
// } else {
// // There should be enough bits still left in the data segment;
// // if so, just break out of the outer while loop.
// //if (m_nGetBits >= nbits)
// if ( get_bits >= 0 )
// break;
// }
// }
//
// get_buff = (get_buff << 8) | ((int) uc);
// get_bits += 8;
// }
// else
// break;
// }
// }
/**
* Loads at least specified number of bits into the register
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_load_bits(
const unsigned int required_bit_count, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx
) {
// Add bytes until have enough
while(r_bit_count < required_bit_count) {
// Load byte value and posibly skip next stuffed byte if loaded byte's value is 0xFF
const uint8_t byte_value = ((const uint8_t*)s_byte)[s_byte_idx++];
if((uint8_t)0xFF == byte_value) {
s_byte_idx++;
}
// Add newly loaded byte to the buffer, updating bit count
r_bit = (r_bit << 8) + byte_value;
r_bit_count += 8;
}
}
/**
* Get bits
*
* @param nbits Number of bits to get
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return bits
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_get_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// update remaining bit count
r_bit_count -= nbits;
// return bits
return (r_bit >> r_bit_count) & ((1 << nbits) - 1);
}
/**
* Gets bits without removing them from the buffer.
*/
__device__ inline unsigned int
gpujpeg_huffman_gpu_decoder_peek_bits(
const unsigned int nbits, unsigned int & r_bit, unsigned int & r_bit_count,
uint4 * const s_byte, unsigned int & s_byte_idx)
{
// load bits into the register if haven't got enough
gpujpeg_huffman_gpu_decoder_load_bits(nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return bits
return (r_bit >> (r_bit_count - nbits)) & ((1 << nbits) - 1);
}
/**
* Removes some bits from the buffer (assumes that they are there)
*/
__device__ inline void
gpujpeg_huffman_gpu_decoder_discard_bits(const unsigned int nb, unsigned int, unsigned int & r_bit_count) {
r_bit_count -= nb;
}
/**
* Special Huffman decode:
* (1) For codes with length > 8
* (2) For codes with length < 8 while data is finished
*
* @param table
* @param min_bits
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_special_decode(
const struct gpujpeg_table_huffman_decoder* const table, int min_bits, unsigned int & r_bit,
unsigned int & r_bit_count, uint4 * const s_byte, unsigned int & s_byte_idx)
{
// HUFF_DECODE has determined that the code is at least min_bits
// bits long, so fetch that many bits in one swoop.
int code = gpujpeg_huffman_gpu_decoder_get_bits(min_bits, r_bit, r_bit_count, s_byte, s_byte_idx);
// Collect the rest of the Huffman code one bit at a time.
// This is per Figure F.16 in the JPEG spec.
int l = min_bits;
while ( code > table->maxcode[l] ) {
code <<= 1;
code |= gpujpeg_huffman_gpu_decoder_get_bits(1, r_bit, r_bit_count, s_byte, s_byte_idx);
l++;
}
// With garbage input we may reach the sentinel value l = 17.
if ( l > 16 ) {
// Fake a zero as the safest result
return 0;
}
return table->huffval[table->valptr[l] + (int)(code - table->mincode[l])];
}
/**
* To find dc or ac value according to code and its bit length s
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_value_from_category(int nbits, int code)
{
// TODO: try to replace with __constant__ table lookup
return code < ((1 << nbits) >> 1) ? (code + ((-1) << nbits) + 1) : code;
// // Method 1:
// // On some machines, a shift and add will be faster than a table lookup.
// // #define HUFF_EXTEND(x,s) \
// // ((x)< (1<<((s)-1)) ? (x) + (((-1)<<(s)) + 1) : (x))
//
// // Method 2: Table lookup
// // If (offset < half[category]), then value is below zero
// // Otherwise, value is above zero, and just the offset
// // entry n is 2**(n-1)
// const int half[16] = {
// 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040,
// 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000
// };
//
// //start[i] is the starting value in this category; surely it is below zero
// // entry n is (-1 << n) + 1
// const int start[16] = {
// 0, ((-1)<<1) + 1, ((-1)<<2) + 1, ((-1)<<3) + 1, ((-1)<<4) + 1,
// ((-1)<<5) + 1, ((-1)<<6) + 1, ((-1)<<7) + 1, ((-1)<<8) + 1,
// ((-1)<<9) + 1, ((-1)<<10) + 1, ((-1)<<11) + 1, ((-1)<<12) + 1,
// ((-1)<<13) + 1, ((-1)<<14) + 1, ((-1)<<15) + 1
// };
//
// return (code < half[nbits]) ? (code + start[nbits]) : code;
}
/**
* Decodes next coefficient, updating its output index
*
* @param table
* @param get_bits
* @param get_buff
* @param data
* @param data_size
* @return int
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_get_coefficient(
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const unsigned int table_offset, unsigned int & coefficient_idx)
{
// Peek next 16 bits and use them as an index into decoder table to find all the info.
const unsigned int table_idx = table_offset + gpujpeg_huffman_gpu_decoder_peek_bits(16, r_bit, r_bit_count, s_byte, s_byte_idx);
// Try the quick table first (use the full table only if not succeded with the quick table)
unsigned int packed_info = gpujpeg_huffman_gpu_decoder_tables_quick_const[table_idx >> (16 - QUICK_CHECK_BITS)];
if(0 == packed_info) {
packed_info = gpujpeg_huffman_gpu_decoder_tables_full[table_idx];
}
// remove the right number of bits from the bit buffer
gpujpeg_huffman_gpu_decoder_discard_bits((packed_info >> 4) & 0x1F, r_bit, r_bit_count);
// update coefficient index by skipping run-length encoded zeros
coefficient_idx += packed_info >> 9;
// read coefficient bits and decode the coefficient from them
const unsigned int value_nbits = packed_info & 0xF;
const unsigned int value_code = gpujpeg_huffman_gpu_decoder_get_bits(value_nbits, r_bit, r_bit_count, s_byte, s_byte_idx);
// return deocded coefficient
return gpujpeg_huffman_gpu_decoder_value_from_category(value_nbits, value_code);
}
/**
* Decode one 8x8 block
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ inline int
gpujpeg_huffman_gpu_decoder_decode_block(
int & dc, int16_t* const data_output, const unsigned int dc_table_offset, const unsigned int ac_table_offset,
unsigned int & r_bit, unsigned int & r_bit_count, uint4* const s_byte,
unsigned int & s_byte_idx, const uint4* & d_byte, unsigned int & d_byte_chunk_count)
{
// TODO: try unified decoding of DC/AC coefficients
// Index of next coefficient to be decoded (in ZIG-ZAG order)
unsigned int coefficient_idx = 0;
// Section F.2.2.1: decode the DC coefficient difference
// Get the coefficient value (using DC coding table)
int dc_coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(r_bit, r_bit_count, s_byte, s_byte_idx, dc_table_offset, coefficient_idx);
// Convert DC difference to actual value, update last_dc_val
dc = dc_coefficient_value += dc;
// Output the DC coefficient (assumes gpujpeg_natural_order[0] = 0)
// TODO: try to skip saving of zero coefficients
data_output[0] = dc_coefficient_value;
// TODO: error check: coefficient_idx must still be 0 in valid codestreams
coefficient_idx = 1;
// Section F.2.2.2: decode the AC coefficients
// Since zeroes are skipped, output area must be cleared beforehand
do {
// Possibly load more bytes into shared buffer from global memory
if(s_byte_idx >= 16) {
// Move remaining bytes to begin and update index of next byte
s_byte[0] = s_byte[1];
s_byte_idx -= 16;
// Load another byte chunk from global memory only if there is one
if(d_byte_chunk_count) {
s_byte[1] = *(d_byte++);
d_byte_chunk_count--;
}
}
// decode next coefficient, updating its destination index
const int coefficient_value = gpujpeg_huffman_gpu_decoder_get_coefficient(r_bit, r_bit_count, s_byte, s_byte_idx, ac_table_offset, coefficient_idx);
// stop with this block if have all coefficients
if(coefficient_idx > 64) {
break;
}
// save the coefficient TODO: try to ommit saving 0 coefficients
data_output[gpujpeg_huffman_gpu_decoder_order_natural[coefficient_idx - 1]] = coefficient_value;
} while(coefficient_idx < 64);
return 0;
}
/**
* Huffman decoder kernel
*
* @return void
*/
template <bool SINGLE_COMP, int THREADS_PER_TBLOCK>
__global__ void
#if __CUDA_ARCH__ < 200
__launch_bounds__(THREADS_PER_TBLOCK, 2)
#else
__launch_bounds__(THREADS_PER_TBLOCK, 6)
#endif
gpujpeg_huffman_decoder_decode_kernel(
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* d_block_list,
int16_t* d_data_quantized
) {
int segment_index = blockIdx.x * THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Byte buffers in shared memory
__shared__ uint4 s_byte_all[2 * THREADS_PER_TBLOCK]; // 32 bytes per thread
uint4 * const s_byte = s_byte_all + 2 * threadIdx.x;
// Last DC coefficient values TODO: try to move into shared memory
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Get aligned compressed data chunk pointer and load first 2 chunks of the data
const unsigned int d_byte_begin_idx = segment->data_compressed_index;
const unsigned int d_byte_begin_idx_aligned = d_byte_begin_idx & ~15; // loading 16byte chunks
const uint4* d_byte = (uint4*)(d_data_compressed + d_byte_begin_idx_aligned);
// Get number of remaining global memory byte chunks (not to read bytes out of buffer)
const unsigned int d_byte_end_idx_aligned = (d_byte_begin_idx + segment->data_compressed_size + 15) & ~15;
unsigned int d_byte_chunk_count = (d_byte_end_idx_aligned - d_byte_begin_idx_aligned) / 16;
// Load first 2 chunks of compressed data into the shared memory buffer and remember index of first code byte (skipping bytes read due to alignment)
s_byte[0] = d_byte[0];
s_byte[1] = d_byte[1];
d_byte += 2;
d_byte_chunk_count = max(d_byte_chunk_count, 2) - 2;
unsigned int s_byte_idx = d_byte_begin_idx - d_byte_begin_idx_aligned;
// bits loaded into the register and their count
unsigned int r_bit_count = 0;
unsigned int r_bit = 0; // LSB-aligned
// Non-interleaving mode
if ( SINGLE_COMP ) {
// Get component for current scan
const struct gpujpeg_component* const component = d_component + segment->scan_index;
// Get huffman tables offset
const unsigned int dc_table_offset = component->dc_huff_idx * 0x20000;
const unsigned int ac_table_offset = component->ac_huff_idx * 0x20000 + 0x10000;
// Size of MCUs in this segment's component
const int component_mcu_size = component->mcu_size;
// Pointer to first MCU's output block
int16_t* block = component->d_data_quantized + segment->scan_segment_index * component->segment_mcu_count * component_mcu_size;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_decoder_decode_block(dc[0], block, dc_table_offset, ac_table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count) != 0 )
break;
// advance to next block
block += component_mcu_size;
}
}
// Interleaving mode
else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = packed_block_info & 0x7f;
// Get offset to right part of huffman table
const unsigned int dc_huffman_table_offset = d_component[last_dc_idx].dc_huff_idx * 0x20000;
const unsigned int ac_huffman_table_offset = d_component[last_dc_idx].ac_huff_idx * 0x20000 + 0x10000;
// Source data pointer
int16_t* block = d_data_quantized + (packed_block_info >> 8);
// Encode 8x8 block
gpujpeg_huffman_gpu_decoder_decode_block(dc[last_dc_idx], block, dc_huffman_table_offset, ac_huffman_table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
}
// // Encode MCUs in segment
// for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//
//
//
//
//
//
//
//
// //assert(segment->scan_index == 0);
// for ( int comp = 0; comp < comp_count; comp++ ) {
// struct gpujpeg_component* component = &d_component[comp];
//
// // Prepare mcu indexes
// int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
// int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// // Compute base data index
// int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
//
// // For all vertical 8x8 blocks
// for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// // Compute base row data index
// int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// // For all horizontal 8x8 blocks
// for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// // Compute 8x8 block data index
// int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
//
// // Get component data for MCU
// int16_t* block = &component->d_data_quantized[data_index];
//
// // Get coder parameters
// int & component_dc = dc[comp];
//
// // Get huffman tables offset
// const unsigned int table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0x00000 : 0x20000;
//
// // Encode 8x8 block
// gpujpeg_huffman_gpu_decoder_decode_block(component_dc, block, table_offset, r_bit, r_bit_count, s_byte, s_byte_idx, d_byte, d_byte_chunk_count);
// }
// }
// }
// }
}
}
/**
* Setup of one Huffman table entry for fast decoding.
* @param bits bits to extract one codeword from (first bit is bit #15, then #14, ... last is #0)
* @param d_table_src source (slow-decoding) table pointer
* @param d_table_dest destination (fast-decoding) table pointer
*/
__device__ void
gpujpeg_huffman_gpu_decoder_table_setup(
const int bits,
const struct gpujpeg_table_huffman_decoder* const d_table_src,
const int table_idx
) {
// Decode one codeword from given bits to get following:
// - minimal number of bits actually needed to decode the codeword (up to 16 bits, 0 for invalid ones)
// - category ID represented by the codeword, consisting from:
// - number of run-length-coded preceding zeros (up to 16, or 63 for both special end-of block symbol or invalid codewords)
// - bit-size of the actual value of coefficient (up to 16, 0 for invalid ones)
int code_nbits = 1, category_id = 0;
// First, decode codeword length (This is per Figure F.16 in the JPEG spec.)
int code_value = bits >> 15; // only single bit initially
while ( code_value > d_table_src->maxcode[code_nbits] ) {
code_value = bits >> (16 - ++code_nbits); // not enough to decide => try more bits
}
// With garbage input we may reach the sentinel value l = 17.
if ( code_nbits > 16 ) {
code_nbits = 0;
// category ID remains 0 for invalid symbols from garbage input
} else {
category_id = d_table_src->huffval[d_table_src->valptr[code_nbits] + code_value - d_table_src->mincode[code_nbits]];
}
// decompose category number into 1 + number of run-length coded zeros and length of the value
// (special category #0 contains all invalid codes and special end-of-block code -- all of those codes
// should terminate block decoding => use 64 run-length zeros and 0 value bits for such symbols)
const int value_nbits = 0xF & category_id;
const int rle_zero_count = category_id ? min(1 + (category_id >> 4), 64) : 64;
// save all the info into the right place in the destination table
const int packed_info = (rle_zero_count << 9) + (code_nbits << 4) + value_nbits;
gpujpeg_huffman_gpu_decoder_tables_full[(table_idx << 16) + bits] = packed_info;
// some threads also save entries into the quick table
const int dest_idx_quick = bits >> (16 - QUICK_CHECK_BITS);
if(bits == (dest_idx_quick << (16 - QUICK_CHECK_BITS))) {
// save info also into the quick table if number of required bits is less than quick
// check bit count, otherwise put 0 there to indicate that full table lookup consultation is needed
gpujpeg_huffman_gpu_decoder_tables_quick[(table_idx << QUICK_CHECK_BITS) + dest_idx_quick] = code_nbits <= QUICK_CHECK_BITS ? packed_info : 0;
}
}
/**
* Huffman decoder table setup kernel
* (Based on the original table, this kernel prepares another table, which is more suitable for fast decoding.)
*/
__global__ void
gpujpeg_huffman_decoder_table_kernel(
const struct gpujpeg_table_huffman_decoder* const d_table_y_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_y_ac,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_dc,
const struct gpujpeg_table_huffman_decoder* const d_table_cbcr_ac
) {
// Each thread uses all 4 Huffman tables to "decode" one symbol from its unique 16bits.
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_y_dc, 0);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_y_ac, 1);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_cbcr_dc, 2);
gpujpeg_huffman_gpu_decoder_table_setup(idx, d_table_cbcr_ac, 3);
}
/* Documented at declaration */
int
gpujpeg_huffman_gpu_decoder_init()
{
// Copy natural order to constant device memory
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_decoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman decoder init", return -1);
return 0;
}
/* Documented at declaration */
int
gpujpeg_huffman_gpu_decoder_decode(struct gpujpeg_decoder* decoder)
{
// Get coder
struct gpujpeg_coder* coder = &decoder->coder;
assert(coder->param.restart_interval > 0);
int comp_count = 1;
if (coder->param.interleaved == 1) {
comp_count = coder->param_image.comp_count;
}
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Number of decoder kernel threads per each threadblock
enum { THREADS_PER_TBLOCK = 192 };
// Configure more Shared memory for both kernels
cudaFuncSetCacheConfig(gpujpeg_huffman_decoder_table_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK>, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK>, cudaFuncCachePreferShared);
// Setup GPU tables (one thread for each of 65536 entries)
gpujpeg_huffman_decoder_table_kernel<<<256, 256, 0, *(decoder->stream)>>>(
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC],
decoder->d_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC]
);
gpujpeg_cuda_check_error("Huffman decoder table setup failed", return -1);
// Get pointer to quick decoding table in device memory
void * d_src_ptr = 0;
cudaGetSymbolAddress(&d_src_ptr, gpujpeg_huffman_gpu_decoder_tables_quick);
gpujpeg_cuda_check_error("Huffman decoder table address lookup failed", return -1);
// Copy quick decoding table into constant memory
cudaMemcpyToSymbolAsync(
gpujpeg_huffman_gpu_decoder_tables_quick_const,
d_src_ptr,
sizeof(*gpujpeg_huffman_gpu_decoder_tables_quick) * QUICK_TABLE_ITEMS,
0,
cudaMemcpyDeviceToDevice,
*(decoder->stream)
);
gpujpeg_cuda_check_error("Huffman decoder table copy failed", return -1);
for (int comp = 0; comp < coder->param_image.comp_count; comp++) {
coder->component[comp].dc_huff_idx = decoder->comp_table_huffman_map[comp][GPUJPEG_HUFFMAN_DC];
coder->component[comp].ac_huff_idx = decoder->comp_table_huffman_map[comp][GPUJPEG_HUFFMAN_AC];
}
// Copy updated components to device memory
cudaMemcpyAsync(coder->d_component, coder->component, coder->param_image.comp_count * sizeof(struct gpujpeg_component), cudaMemcpyHostToDevice, *(decoder->stream));
gpujpeg_cuda_check_error("Coder component copy", return 0);
// Run decoding kernel
dim3 thread(THREADS_PER_TBLOCK);
dim3 grid(gpujpeg_div_and_round_up(decoder->segment_count, THREADS_PER_TBLOCK));
if(comp_count == 1) {
gpujpeg_huffman_decoder_decode_kernel<true, THREADS_PER_TBLOCK><<<grid, thread, 0, *(decoder->stream)>>>(
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
} else {
gpujpeg_huffman_decoder_decode_kernel<false, THREADS_PER_TBLOCK><<<grid, thread, 0, *(decoder->stream)>>>(
coder->d_component,
coder->d_segment,
comp_count,
decoder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized
);
}
gpujpeg_cuda_check_error("Huffman decoding failed", return -1);
return 0;
}
/* vi: set expandtab sw=4 : */
|
f422b6f0373a308531253dc43d8b31c4c1de0df2.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorMCMMonoGPU.cuh"
#include "IntegratorMCMMonoImplicitGPU.cuh"
#include "IntegratorMCMMonoImplicitNewGPU.cuh"
#include "ShapeSpheropolyhedron.h"
namespace mcm
{
namespace detail
{
//! MCM kernels for ShapeSpheropolyhedron
template hipError_t gpu_mcm_free_volume<ShapeSpheropolyhedron >(const mcm_free_volume_args_t &args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_mcm_update<ShapeSpheropolyhedron >(const mcm_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_mcm_implicit_count_overlaps<ShapeSpheropolyhedron >(const mcm_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_mcm_implicit_accept_reject<ShapeSpheropolyhedron >(const mcm_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_mcm_insert_depletants_queue<ShapeSpheropolyhedron >(const mcm_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_mcm_implicit_accept_reject_new<ShapeSpheropolyhedron >(const mcm_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
}; // end namespace detail
} // end namespace mcm
|
f422b6f0373a308531253dc43d8b31c4c1de0df2.cu
|
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorMCMMonoGPU.cuh"
#include "IntegratorMCMMonoImplicitGPU.cuh"
#include "IntegratorMCMMonoImplicitNewGPU.cuh"
#include "ShapeSpheropolyhedron.h"
namespace mcm
{
namespace detail
{
//! MCM kernels for ShapeSpheropolyhedron
template cudaError_t gpu_mcm_free_volume<ShapeSpheropolyhedron >(const mcm_free_volume_args_t &args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_mcm_update<ShapeSpheropolyhedron >(const mcm_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_mcm_implicit_count_overlaps<ShapeSpheropolyhedron >(const mcm_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_mcm_implicit_accept_reject<ShapeSpheropolyhedron >(const mcm_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_mcm_insert_depletants_queue<ShapeSpheropolyhedron >(const mcm_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_mcm_implicit_accept_reject_new<ShapeSpheropolyhedron >(const mcm_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
}; // end namespace detail
} // end namespace mcm
|
bc8243d1dd505e5db1edd0fb5612d29e610b34d9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetf2_nopiv_kernels.cu, normal z -> s, Mon Jun 25 18:24:16 2018
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// This kernel uses registers for matrix storage, shared mem. for communication.
// It also uses lazy swap.
extern __shared__ float zdata[];
template<int N>
__device__ void
sgetf2_nopiv_device(int m, float* dA, int ldda, magma_int_t *info, const int tx, float* sx, int gbstep)
{
float rA[N] = {MAGMA_S_ZERO};
float reg = MAGMA_S_ZERO;
int linfo = 0;
float abs;
// read
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
#pragma unroll
for(int i = 0; i < N; i++){
if(tx == i){
#pragma unroll
for(int j = 0; j < N; j++)
sx[j] = rA[j];
}
__syncthreads();
abs = fabs(MAGMA_S_REAL( sx[i] )) + fabs(MAGMA_S_IMAG( sx[i] ));
linfo = ( abs == MAGMA_D_ZERO ) ? min(linfo,gbstep+i+1):0;
reg = (linfo == 0 ) ? MAGMA_S_DIV(MAGMA_S_ONE, sx[i] ) : MAGMA_S_ONE;
// scal and ger
if( tx > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * sx[j];
}
}
__syncthreads();
}
if(tx == 0){
(*info) = (magma_int_t)( linfo );
}
// write
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + tx ] = rA[i];
}
}
/******************************************************************************/
extern __shared__ float zdata[];
template<int N, int NPOW2>
__global__ void
sgetf2_nopiv_batched_kernel( int m, float** dA_array, int ai, int aj, int ldda,
magma_int_t* info_array, int gbstep, int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount)return;
float* dA = dA_array[batchid] + aj * ldda + ai;
magma_int_t* info = &info_array[batchid];
float* sx = (float*)zdata;
sx += ty * NPOW2;
sgetf2_nopiv_device<N>(m, dA, ldda, info, tx, sx, gbstep);
}
/***************************************************************************//**
Purpose
-------
sgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A
using partial pivoting with row interchanges.
This routine can deal only with square matrices of size up to 32
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
Arguments
---------
@param[in]
n INTEGER
The size of each matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_sgetf2_nopiv_internal_batched(
magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ai, magma_int_t aj, magma_int_t ldda,
magma_int_t* info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue )
{
#define dAarray(i,j) dA_array, i, j
magma_int_t arginfo = 0;
if (m < 0) {
arginfo = -1;
} else if (n < 0 || n > 32 || (m > 512 && n > 16) ) {
arginfo = -2;
} else if (ai < 0) {
arginfo = -4;
} else if (aj < 0) {
arginfo = -5;
} else if (ldda < max(1,m)) {
arginfo = -6;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magma_int_t m1 = (m > MAX_NTHREADS) ? MAX_NTHREADS : m;
magma_int_t m2 = m - m1;
const magma_int_t ntcol = (m1 > 32) ? 1 : (2 * (32/m1));
magma_int_t shmem = ntcol * magma_ceilpow2(n) * sizeof(float);
magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 threads(m1, ntcol, 1);
dim3 grid(gridx, 1, 1);
switch(n){
case 1:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 1, magma_ceilpow2( 1)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 2:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 2, magma_ceilpow2( 2)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 3:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 3, magma_ceilpow2( 3)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 4:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 4, magma_ceilpow2( 4)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 5:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 5, magma_ceilpow2( 5)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 6:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 6, magma_ceilpow2( 6)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 7:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 7, magma_ceilpow2( 7)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 8:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 8, magma_ceilpow2( 8)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 9:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel< 9, magma_ceilpow2( 9)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 10:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<10, magma_ceilpow2(10)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 11:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<11, magma_ceilpow2(11)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 12:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<12, magma_ceilpow2(12)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 13:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<13, magma_ceilpow2(13)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 14:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<14, magma_ceilpow2(14)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 15:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<15, magma_ceilpow2(15)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 16:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<16, magma_ceilpow2(16)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 17:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<17, magma_ceilpow2(17)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 18:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<18, magma_ceilpow2(18)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 19:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<19, magma_ceilpow2(19)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 20:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<20, magma_ceilpow2(20)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 21:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<21, magma_ceilpow2(21)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 22:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<22, magma_ceilpow2(22)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 23:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<23, magma_ceilpow2(23)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 24:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<24, magma_ceilpow2(24)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 25:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<25, magma_ceilpow2(25)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 26:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<26, magma_ceilpow2(26)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 27:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<27, magma_ceilpow2(27)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 28:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<28, magma_ceilpow2(28)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 29:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<29, magma_ceilpow2(29)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 30:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<30, magma_ceilpow2(30)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 31:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<31, magma_ceilpow2(31)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 32:hipLaunchKernelGGL(( sgetf2_nopiv_batched_kernel<32, magma_ceilpow2(32)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
default: printf("error: panel width %lld is not supported\n", (long long) n);
}
if(m2 > 0){
magmablas_strsm_recursive_batched(
MagmaRight, MagmaUpper, MagmaNoTrans, MagmaNonUnit,
m2, n, MAGMA_S_ONE,
dAarray(ai ,aj), ldda,
dAarray(ai+m1,aj), ldda, batchCount, queue );
}
#undef dAarray
return arginfo;
}
|
bc8243d1dd505e5db1edd0fb5612d29e610b34d9.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetf2_nopiv_kernels.cu, normal z -> s, Mon Jun 25 18:24:16 2018
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// This kernel uses registers for matrix storage, shared mem. for communication.
// It also uses lazy swap.
extern __shared__ float zdata[];
template<int N>
__device__ void
sgetf2_nopiv_device(int m, float* dA, int ldda, magma_int_t *info, const int tx, float* sx, int gbstep)
{
float rA[N] = {MAGMA_S_ZERO};
float reg = MAGMA_S_ZERO;
int linfo = 0;
float abs;
// read
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
#pragma unroll
for(int i = 0; i < N; i++){
if(tx == i){
#pragma unroll
for(int j = 0; j < N; j++)
sx[j] = rA[j];
}
__syncthreads();
abs = fabs(MAGMA_S_REAL( sx[i] )) + fabs(MAGMA_S_IMAG( sx[i] ));
linfo = ( abs == MAGMA_D_ZERO ) ? min(linfo,gbstep+i+1):0;
reg = (linfo == 0 ) ? MAGMA_S_DIV(MAGMA_S_ONE, sx[i] ) : MAGMA_S_ONE;
// scal and ger
if( tx > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * sx[j];
}
}
__syncthreads();
}
if(tx == 0){
(*info) = (magma_int_t)( linfo );
}
// write
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + tx ] = rA[i];
}
}
/******************************************************************************/
extern __shared__ float zdata[];
template<int N, int NPOW2>
__global__ void
sgetf2_nopiv_batched_kernel( int m, float** dA_array, int ai, int aj, int ldda,
magma_int_t* info_array, int gbstep, int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount)return;
float* dA = dA_array[batchid] + aj * ldda + ai;
magma_int_t* info = &info_array[batchid];
float* sx = (float*)zdata;
sx += ty * NPOW2;
sgetf2_nopiv_device<N>(m, dA, ldda, info, tx, sx, gbstep);
}
/***************************************************************************//**
Purpose
-------
sgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A
using partial pivoting with row interchanges.
This routine can deal only with square matrices of size up to 32
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
Arguments
---------
@param[in]
n INTEGER
The size of each matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_sgetf2_nopiv_internal_batched(
magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ai, magma_int_t aj, magma_int_t ldda,
magma_int_t* info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue )
{
#define dAarray(i,j) dA_array, i, j
magma_int_t arginfo = 0;
if (m < 0) {
arginfo = -1;
} else if (n < 0 || n > 32 || (m > 512 && n > 16) ) {
arginfo = -2;
} else if (ai < 0) {
arginfo = -4;
} else if (aj < 0) {
arginfo = -5;
} else if (ldda < max(1,m)) {
arginfo = -6;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magma_int_t m1 = (m > MAX_NTHREADS) ? MAX_NTHREADS : m;
magma_int_t m2 = m - m1;
const magma_int_t ntcol = (m1 > 32) ? 1 : (2 * (32/m1));
magma_int_t shmem = ntcol * magma_ceilpow2(n) * sizeof(float);
magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 threads(m1, ntcol, 1);
dim3 grid(gridx, 1, 1);
switch(n){
case 1: sgetf2_nopiv_batched_kernel< 1, magma_ceilpow2( 1)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 2: sgetf2_nopiv_batched_kernel< 2, magma_ceilpow2( 2)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 3: sgetf2_nopiv_batched_kernel< 3, magma_ceilpow2( 3)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 4: sgetf2_nopiv_batched_kernel< 4, magma_ceilpow2( 4)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 5: sgetf2_nopiv_batched_kernel< 5, magma_ceilpow2( 5)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 6: sgetf2_nopiv_batched_kernel< 6, magma_ceilpow2( 6)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 7: sgetf2_nopiv_batched_kernel< 7, magma_ceilpow2( 7)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 8: sgetf2_nopiv_batched_kernel< 8, magma_ceilpow2( 8)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 9: sgetf2_nopiv_batched_kernel< 9, magma_ceilpow2( 9)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 10: sgetf2_nopiv_batched_kernel<10, magma_ceilpow2(10)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 11: sgetf2_nopiv_batched_kernel<11, magma_ceilpow2(11)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 12: sgetf2_nopiv_batched_kernel<12, magma_ceilpow2(12)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 13: sgetf2_nopiv_batched_kernel<13, magma_ceilpow2(13)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 14: sgetf2_nopiv_batched_kernel<14, magma_ceilpow2(14)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 15: sgetf2_nopiv_batched_kernel<15, magma_ceilpow2(15)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 16: sgetf2_nopiv_batched_kernel<16, magma_ceilpow2(16)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 17: sgetf2_nopiv_batched_kernel<17, magma_ceilpow2(17)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 18: sgetf2_nopiv_batched_kernel<18, magma_ceilpow2(18)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 19: sgetf2_nopiv_batched_kernel<19, magma_ceilpow2(19)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 20: sgetf2_nopiv_batched_kernel<20, magma_ceilpow2(20)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 21: sgetf2_nopiv_batched_kernel<21, magma_ceilpow2(21)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 22: sgetf2_nopiv_batched_kernel<22, magma_ceilpow2(22)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 23: sgetf2_nopiv_batched_kernel<23, magma_ceilpow2(23)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 24: sgetf2_nopiv_batched_kernel<24, magma_ceilpow2(24)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 25: sgetf2_nopiv_batched_kernel<25, magma_ceilpow2(25)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 26: sgetf2_nopiv_batched_kernel<26, magma_ceilpow2(26)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 27: sgetf2_nopiv_batched_kernel<27, magma_ceilpow2(27)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 28: sgetf2_nopiv_batched_kernel<28, magma_ceilpow2(28)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 29: sgetf2_nopiv_batched_kernel<29, magma_ceilpow2(29)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 30: sgetf2_nopiv_batched_kernel<30, magma_ceilpow2(30)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 31: sgetf2_nopiv_batched_kernel<31, magma_ceilpow2(31)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
case 32: sgetf2_nopiv_batched_kernel<32, magma_ceilpow2(32)><<<grid, threads, shmem, queue->cuda_stream()>>>(m1, dA_array, ai, aj, ldda, info_array, gbstep, batchCount); break;
default: printf("error: panel width %lld is not supported\n", (long long) n);
}
if(m2 > 0){
magmablas_strsm_recursive_batched(
MagmaRight, MagmaUpper, MagmaNoTrans, MagmaNonUnit,
m2, n, MAGMA_S_ONE,
dAarray(ai ,aj), ldda,
dAarray(ai+m1,aj), ldda, batchCount, queue );
}
#undef dAarray
return arginfo;
}
|
826ef4a84a662edb8af53ab117f4295482a73472.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "structs.cuh"
#include "semaphore.h"
#include "LBMconsts.cuh"
#include "phys.h"
#include "compact-steps.cuh"
#include "lbm-steps.cuh"
void calcLBM(int it, std::vector<double>& timings);
void calcConeFold(int it, std::vector<double>& timings);
void simple_drop();
void debug_print();
void calcStep(int REV=1){
cuTimer calct;
parsHost.iStep++;
std::vector<double> timings;
int Ntiles=0;
calcConeFold(parsHost.iStep, timings);
copy2dev( parsHost, pars );
copy2dev( PPhost, PPdev );
double phys_time=parsHost.iStep;
double calc_time = calct.gettime();
printf("Step %6d (physical time %6.3f ms) | Performance: %.2f ms (%.2f MLU/sec) | timings: ",
parsHost.iStep ,phys_time, calc_time,
(unsigned long)Nx*Ny*Nz*parsHost.Nt/calc_time*1e-3 );
for(auto tmg: timings) printf("%.2f ",tmg);
printf("\n");
}
void calcLBM(int it, std::vector<double>& timings){
cuTimer t0;
using namespace CompStep;
const size_t shmem_size = 48*1024;
CHECK_ERROR(hipFuncSetAttribute(compactStep<0>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem_size));
CHECK_ERROR(hipFuncSetAttribute(compactStep<1>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem_size));
hipLaunchKernelGGL(( compactStep<0>), dim3(dim3(Nx/Nb.x,Ny/Nb.y,Nz/Nb.z)),dim3(dim3(Nb.x,Nb.y,Nb.z)),shmem_size, 0, );
hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() );
debug_print(); timings.push_back( t0.getlaptime() );
hipLaunchKernelGGL(( compactStep<1>), dim3(dim3(Nx/Nb.x,Ny/Nb.y,Nz/Nb.z)),dim3(dim3(Nb.x,Nb.y,Nb.z)),shmem_size, 0, );
hipDeviceSynchronize(); CHECK_ERROR( hipGetLastError() );
debug_print(); timings.push_back( t0.getlaptime() );
}
void debug_print(){
return;
}
|
826ef4a84a662edb8af53ab117f4295482a73472.cu
|
#include "structs.cuh"
#include "semaphore.h"
#include "LBMconsts.cuh"
#include "phys.h"
#include "compact-steps.cuh"
#include "lbm-steps.cuh"
void calcLBM(int it, std::vector<double>& timings);
void calcConeFold(int it, std::vector<double>& timings);
void simple_drop();
void debug_print();
void calcStep(int REV=1){
cuTimer calct;
parsHost.iStep++;
std::vector<double> timings;
int Ntiles=0;
calcConeFold(parsHost.iStep, timings);
copy2dev( parsHost, pars );
copy2dev( PPhost, PPdev );
double phys_time=parsHost.iStep;
double calc_time = calct.gettime();
printf("Step %6d (physical time %6.3f ms) | Performance: %.2f ms (%.2f MLU/sec) | timings: ",
parsHost.iStep ,phys_time, calc_time,
(unsigned long)Nx*Ny*Nz*parsHost.Nt/calc_time*1e-3 );
for(auto tmg: timings) printf("%.2f ",tmg);
printf("\n");
}
void calcLBM(int it, std::vector<double>& timings){
cuTimer t0;
using namespace CompStep;
const size_t shmem_size = 48*1024;
CHECK_ERROR(cudaFuncSetAttribute(compactStep<0>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem_size));
CHECK_ERROR(cudaFuncSetAttribute(compactStep<1>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem_size));
compactStep<0><<<dim3(Nx/Nb.x,Ny/Nb.y,Nz/Nb.z),dim3(Nb.x,Nb.y,Nb.z),shmem_size>>>();
cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() );
debug_print(); timings.push_back( t0.getlaptime() );
compactStep<1><<<dim3(Nx/Nb.x,Ny/Nb.y,Nz/Nb.z),dim3(Nb.x,Nb.y,Nb.z),shmem_size>>>();
cudaDeviceSynchronize(); CHECK_ERROR( cudaGetLastError() );
debug_print(); timings.push_back( t0.getlaptime() );
}
void debug_print(){
return;
}
|
f71d8515a08e519682adfb6518c885667e4dc669.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper.hpp"
// Sequential code for the forward path of the convolution layer
// You should not modify this code
static void conv_forward_valid(const float *X, const shape &xdims, const float *W, const shape &wdims, float *Y,
const shape &ydims) {
std::fill(Y, Y + ydims.flattened_length(), 0);
for (auto i : range(0, ydims.num)) {
for (auto m : range(0, ydims.depth )) { // for each output feature map
for (auto h : range(0, ydims.height)) { // for each output element
for (auto w : range(0, ydims.width )) {
const auto yoffset = ((i * ydims.depth + m) * ydims.height + h) * ydims.width + w;
for (auto c : range(0, xdims.depth )) { // sum over all input feature maps
for (auto p : range(0, wdims.height)) { // filter height
for (auto q : range(0, wdims.width )) { // filter width
const auto xoffset = ((((i * xdims.depth) + c) * xdims.height) + (h + p)) * xdims.width + (w + q);
const auto woffset = ((((m * wdims.depth) + c) * wdims.height) + p) * wdims.width + q;
Y[yoffset] += X[xoffset] * W[woffset];
}
}
}
}
}
}
}
}
// Baseline GPU kernel code for forward convolution.
// One thread per output index
// You should not modify this kernel as it is used for correctness comparison.
// Instead, define a new one below
__global__ void conv_forward_baseline_kernel(const float *X, const shape xdims, const float *W, const shape wdims, float *Y,
const shape ydims) {
const size_t gx = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t i = gx; i < ydims.num * ydims.depth * ydims.height * ydims.width; i += blockDim.x * gridDim.x) {
Y[i] = 0.f;
}
for (size_t i = gx; i < ydims.num; i += gridDim.x * blockDim.x) {
for (auto m : range(0, ydims.depth )) { // for each output feature map
for (auto h : range(0, ydims.height)) { // for each output element
for (auto w : range(0, ydims.width )) {
const size_t yoffset = ((i * ydims.depth + m) * ydims.height + h) * ydims.width + w;
for (auto c : range(0, xdims.depth )) { // sum over all input feature maps
for (auto p : range(0, wdims.height)) { // filter height
for (auto q : range(0, wdims.width )) { // filter width
const size_t xoffset = ((((i * xdims.depth) + c) * xdims.height) + (h + p)) * xdims.width + (w + q);
const size_t woffset = ((((m * wdims.depth) + c) * wdims.height) + p) * wdims.width + q;
Y[yoffset] += X[xoffset] * W[woffset];
}
}
}
}
}
}
}
}
// Host code to configure baseline GPU kernel
static void convlayer_gpu_baseline(const float *X, const shape &xdims, const float *W, const shape &wdims, float *Y,
const shape &ydims) {
dim3 dimGrid(1);
dim3 dimBlock(32);
hipLaunchKernelGGL(( conv_forward_baseline_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, X, xdims, W, wdims, Y, ydims);
THROW_IF_ERROR(hipGetLastError());
}
// Implement your optimized kernel here.
// Make any modifications you wish.
// Don't forget to modify the host code below, if needed!
__global__ void conv_forward_opt_kernel(const float *X, const shape xdims, const float *W, const shape wdims, float *Y,
const shape ydims) {
const size_t gx = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t i = gx; i < ydims.num * ydims.depth * ydims.height * ydims.width; i += blockDim.x * gridDim.x) {
Y[i] = 0.f;
}
//@@ YOUR CODE HERE!
}
// Host code to configure baseline GPU kernel
static void convlayer_gpu_opt(const float *X, const shape &xdims, const float *W, const shape &wdims, float *Y,
const shape &ydims) {
// Modify this code to configure your optimized kernel.
//@@ YOUR CODE HERE!!!
dim3 dimGrid(1);
dim3 dimBlock(32);
hipLaunchKernelGGL(( conv_forward_opt_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, X, xdims, W, wdims, Y, ydims);
THROW_IF_ERROR(hipGetLastError());
}
static int eval(const shape wDims, const shape xDims, bool doVerify) {
// Generate model
const auto conf_info = std::string("conv[wDims:") + std::to_string(wDims.num) + "," +
std::to_string(wDims.depth) + "," +
std::to_string(wDims.height) + "," +
std::to_string(wDims.width) +
" xDims:" + std::to_string(xDims.num) + "," +
std::to_string(xDims.depth) + "," +
std::to_string(xDims.height) + "," +
std::to_string(xDims.width) + "]";
INFO("Running " << conf_info);
// Generate convolution weights
float *hostW = allocate<float>(wDims);
generate_convfilters(hostW, wDims);
// generate input feature map
float *hostX = allocate<float>(xDims);
generate_data(hostX, xDims);
// generate output feature map for verification
const shape ydims = {xDims.num, wDims.num, (xDims.height - wDims.height + 1),
(xDims.width - wDims.width + 1)};
INFO("Allocating output tensor [" << ydims.num << "," << ydims.depth << "," << ydims.height << "," << ydims.width << "]");
float *hostY = allocate<float>(ydims);
float *expected = allocate<float>(ydims);
generate_data(hostY, ydims);
const size_t wByteCount = wDims.flattened_length() * sizeof(float);
const size_t xByteCount = xDims.flattened_length() * sizeof(float);
const size_t yByteCount = ydims.flattened_length() * sizeof(float);
float *deviceW = nullptr, *deviceX = nullptr, *deviceY = nullptr;
timer_start("Allocating GPU memory.");
THROW_IF_ERROR(hipMalloc((void **)&deviceW, wByteCount));
THROW_IF_ERROR(hipMalloc((void **)&deviceX, xByteCount));
THROW_IF_ERROR(hipMalloc((void **)&deviceY, yByteCount));
timer_stop();
timer_start("Copying inputs to the GPU.");
THROW_IF_ERROR(hipMemcpy(deviceW, hostW, wByteCount, hipMemcpyDefault));
THROW_IF_ERROR(hipMemcpy(deviceX, hostX, xByteCount, hipMemcpyDefault));
timer_stop();
//////////////////////////////////////////
// GPU Gather Computation
//////////////////////////////////////////
timer_start("Performing GPU convlayer");
convlayer_gpu_opt(deviceX, xDims, deviceW, wDims, deviceY, ydims);
THROW_IF_ERROR(hipDeviceSynchronize());
timer_stop();
// verify with provided implementation
if (doVerify) {
timer_start("Copying output to the CPU");
THROW_IF_ERROR(hipMemcpy(hostY, deviceY, yByteCount, hipMemcpyDefault));
timer_stop();
convlayer_gpu_baseline(deviceX, xDims, deviceW, wDims, deviceY, ydims);
THROW_IF_ERROR(hipDeviceSynchronize());
THROW_IF_ERROR(hipMemcpy(expected, deviceY, yByteCount, hipMemcpyDefault));
// conv_forward_valid(hostX, xDims, hostW, wDims, expected, ydims);
verify(expected, hostY, ydims);
}
THROW_IF_ERROR(hipFree(deviceW));
THROW_IF_ERROR(hipFree(deviceX));
THROW_IF_ERROR(hipFree(deviceY));
free(hostW);
free(hostX);
free(hostY);
free(expected);
return 0;
}
TEST_CASE("Convlayer", "[convlayer]") {
#if 1
// test five times in case code errors depend on data
SECTION("[wDims:32,1,5,5 xDims:20,1,28,28]") {
eval({32,1,5,5}, {20,1,28,28}, true);
}
SECTION("[wDims:32,1,5,5 xDims:20,1,28,28]") {
eval({32,1,5,5}, {20,1,28,28}, true);
}
SECTION("[wDims:32,1,5,5 xDims:20,1,28,28]") {
eval({32,1,5,5}, {20,1,28,28}, true);
}
SECTION("[wDims:32,1,5,5 xDims:20,1,28,28]") {
eval({32,1,5,5}, {20,1,28,28}, true);
}
SECTION("[wDims:32,1,5,5 xDims:20,1,28,28]") {
eval({32,1,5,5}, {20,1,28,28}, true);
}
#else
SECTION("[wDims:32,1,5,5 xDims:50000,1,28,28]") {
eval({32,1,5,5}, {50000,1,28,28}, false);
}
#endif
}
|
f71d8515a08e519682adfb6518c885667e4dc669.cu
|
#include "helper.hpp"
// Sequential code for the forward path of the convolution layer
// You should not modify this code
static void conv_forward_valid(const float *X, const shape &xdims, const float *W, const shape &wdims, float *Y,
const shape &ydims) {
std::fill(Y, Y + ydims.flattened_length(), 0);
for (auto i : range(0, ydims.num)) {
for (auto m : range(0, ydims.depth )) { // for each output feature map
for (auto h : range(0, ydims.height)) { // for each output element
for (auto w : range(0, ydims.width )) {
const auto yoffset = ((i * ydims.depth + m) * ydims.height + h) * ydims.width + w;
for (auto c : range(0, xdims.depth )) { // sum over all input feature maps
for (auto p : range(0, wdims.height)) { // filter height
for (auto q : range(0, wdims.width )) { // filter width
const auto xoffset = ((((i * xdims.depth) + c) * xdims.height) + (h + p)) * xdims.width + (w + q);
const auto woffset = ((((m * wdims.depth) + c) * wdims.height) + p) * wdims.width + q;
Y[yoffset] += X[xoffset] * W[woffset];
}
}
}
}
}
}
}
}
// Baseline GPU kernel code for forward convolution.
// One thread per output index
// You should not modify this kernel as it is used for correctness comparison.
// Instead, define a new one below
__global__ void conv_forward_baseline_kernel(const float *X, const shape xdims, const float *W, const shape wdims, float *Y,
const shape ydims) {
const size_t gx = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t i = gx; i < ydims.num * ydims.depth * ydims.height * ydims.width; i += blockDim.x * gridDim.x) {
Y[i] = 0.f;
}
for (size_t i = gx; i < ydims.num; i += gridDim.x * blockDim.x) {
for (auto m : range(0, ydims.depth )) { // for each output feature map
for (auto h : range(0, ydims.height)) { // for each output element
for (auto w : range(0, ydims.width )) {
const size_t yoffset = ((i * ydims.depth + m) * ydims.height + h) * ydims.width + w;
for (auto c : range(0, xdims.depth )) { // sum over all input feature maps
for (auto p : range(0, wdims.height)) { // filter height
for (auto q : range(0, wdims.width )) { // filter width
const size_t xoffset = ((((i * xdims.depth) + c) * xdims.height) + (h + p)) * xdims.width + (w + q);
const size_t woffset = ((((m * wdims.depth) + c) * wdims.height) + p) * wdims.width + q;
Y[yoffset] += X[xoffset] * W[woffset];
}
}
}
}
}
}
}
}
// Host code to configure baseline GPU kernel
static void convlayer_gpu_baseline(const float *X, const shape &xdims, const float *W, const shape &wdims, float *Y,
const shape &ydims) {
dim3 dimGrid(1);
dim3 dimBlock(32);
conv_forward_baseline_kernel<<<dimGrid, dimBlock>>>(X, xdims, W, wdims, Y, ydims);
THROW_IF_ERROR(cudaGetLastError());
}
// Implement your optimized kernel here.
// Make any modifications you wish.
// Don't forget to modify the host code below, if needed!
__global__ void conv_forward_opt_kernel(const float *X, const shape xdims, const float *W, const shape wdims, float *Y,
const shape ydims) {
const size_t gx = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t i = gx; i < ydims.num * ydims.depth * ydims.height * ydims.width; i += blockDim.x * gridDim.x) {
Y[i] = 0.f;
}
//@@ YOUR CODE HERE!
}
// Host code to configure baseline GPU kernel
static void convlayer_gpu_opt(const float *X, const shape &xdims, const float *W, const shape &wdims, float *Y,
const shape &ydims) {
// Modify this code to configure your optimized kernel.
//@@ YOUR CODE HERE!!!
dim3 dimGrid(1);
dim3 dimBlock(32);
conv_forward_opt_kernel<<<dimGrid, dimBlock>>>(X, xdims, W, wdims, Y, ydims);
THROW_IF_ERROR(cudaGetLastError());
}
static int eval(const shape wDims, const shape xDims, bool doVerify) {
// Generate model
const auto conf_info = std::string("conv[wDims:") + std::to_string(wDims.num) + "," +
std::to_string(wDims.depth) + "," +
std::to_string(wDims.height) + "," +
std::to_string(wDims.width) +
" xDims:" + std::to_string(xDims.num) + "," +
std::to_string(xDims.depth) + "," +
std::to_string(xDims.height) + "," +
std::to_string(xDims.width) + "]";
INFO("Running " << conf_info);
// Generate convolution weights
float *hostW = allocate<float>(wDims);
generate_convfilters(hostW, wDims);
// generate input feature map
float *hostX = allocate<float>(xDims);
generate_data(hostX, xDims);
// generate output feature map for verification
const shape ydims = {xDims.num, wDims.num, (xDims.height - wDims.height + 1),
(xDims.width - wDims.width + 1)};
INFO("Allocating output tensor [" << ydims.num << "," << ydims.depth << "," << ydims.height << "," << ydims.width << "]");
float *hostY = allocate<float>(ydims);
float *expected = allocate<float>(ydims);
generate_data(hostY, ydims);
const size_t wByteCount = wDims.flattened_length() * sizeof(float);
const size_t xByteCount = xDims.flattened_length() * sizeof(float);
const size_t yByteCount = ydims.flattened_length() * sizeof(float);
float *deviceW = nullptr, *deviceX = nullptr, *deviceY = nullptr;
timer_start("Allocating GPU memory.");
THROW_IF_ERROR(cudaMalloc((void **)&deviceW, wByteCount));
THROW_IF_ERROR(cudaMalloc((void **)&deviceX, xByteCount));
THROW_IF_ERROR(cudaMalloc((void **)&deviceY, yByteCount));
timer_stop();
timer_start("Copying inputs to the GPU.");
THROW_IF_ERROR(cudaMemcpy(deviceW, hostW, wByteCount, cudaMemcpyDefault));
THROW_IF_ERROR(cudaMemcpy(deviceX, hostX, xByteCount, cudaMemcpyDefault));
timer_stop();
//////////////////////////////////////////
// GPU Gather Computation
//////////////////////////////////////////
timer_start("Performing GPU convlayer");
convlayer_gpu_opt(deviceX, xDims, deviceW, wDims, deviceY, ydims);
THROW_IF_ERROR(cudaDeviceSynchronize());
timer_stop();
// verify with provided implementation
if (doVerify) {
timer_start("Copying output to the CPU");
THROW_IF_ERROR(cudaMemcpy(hostY, deviceY, yByteCount, cudaMemcpyDefault));
timer_stop();
convlayer_gpu_baseline(deviceX, xDims, deviceW, wDims, deviceY, ydims);
THROW_IF_ERROR(cudaDeviceSynchronize());
THROW_IF_ERROR(cudaMemcpy(expected, deviceY, yByteCount, cudaMemcpyDefault));
// conv_forward_valid(hostX, xDims, hostW, wDims, expected, ydims);
verify(expected, hostY, ydims);
}
THROW_IF_ERROR(cudaFree(deviceW));
THROW_IF_ERROR(cudaFree(deviceX));
THROW_IF_ERROR(cudaFree(deviceY));
free(hostW);
free(hostX);
free(hostY);
free(expected);
return 0;
}
TEST_CASE("Convlayer", "[convlayer]") {
#if 1
// test five times in case code errors depend on data
SECTION("[wDims:32,1,5,5 xDims:20,1,28,28]") {
eval({32,1,5,5}, {20,1,28,28}, true);
}
SECTION("[wDims:32,1,5,5 xDims:20,1,28,28]") {
eval({32,1,5,5}, {20,1,28,28}, true);
}
SECTION("[wDims:32,1,5,5 xDims:20,1,28,28]") {
eval({32,1,5,5}, {20,1,28,28}, true);
}
SECTION("[wDims:32,1,5,5 xDims:20,1,28,28]") {
eval({32,1,5,5}, {20,1,28,28}, true);
}
SECTION("[wDims:32,1,5,5 xDims:20,1,28,28]") {
eval({32,1,5,5}, {20,1,28,28}, true);
}
#else
SECTION("[wDims:32,1,5,5 xDims:50000,1,28,28]") {
eval({32,1,5,5}, {50000,1,28,28}, false);
}
#endif
}
|
5aded30fae16e933fbd6dd1793517aec17da1fd1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <ctime>
#define TILE_WIDTH 20
#define WIDTH 10000
#define MATSIZE 256
#define SIZE (WIDTH * WIDTH * sizeof(float))
float A[WIDTH][WIDTH], B[WIDTH][WIDTH], C[WIDTH][WIDTH];
float *dev_A, *dev_B, *dev_C;
__global__ void matmul(float *A, float *B, float *C) {
__shared__ float sh_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float sh_B[TILE_WIDTH][TILE_WIDTH];
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (row > WIDTH || col > WIDTH)
return;
int partial_val = 0;
for (int tile_id=0; tile_id<WIDTH/TILE_WIDTH; ++tile_id) {
sh_A[threadIdx.y][threadIdx.x] = A[row * WIDTH + (tile_id * TILE_WIDTH + threadIdx.x)];
sh_B[threadIdx.y][threadIdx.x] = B[col + (tile_id * TILE_WIDTH + threadIdx.y) * WIDTH];
__syncthreads();
for (int i=0; i<TILE_WIDTH; ++i) {
partial_val += sh_A[threadIdx.y][i] * sh_B[i][threadIdx.x];
__syncthreads();
}
}
C[row * WIDTH + col] = partial_val;
}
std::string getDimString(dim3 blocks, dim3 threads) {
std::stringstream ss;
ss << "BLOCKS: (" << blocks.x << ", " << blocks.y << ")";
ss << " THREADS: (" << threads.x << ", " << threads.y << ")";
return ss.str();
}
int main () {
std::cout << "Not Done" << std::endl;
for (int i=0; i<WIDTH; ++i) {
for (int j=0; j<WIDTH; ++j) {
A[i][j] = B[i][j] = 2.5;
}
}
hipMalloc(&dev_A, SIZE);
hipMalloc(&dev_B, SIZE);
hipMalloc(&dev_C, SIZE);
hipMemcpy(dev_A, A, SIZE, hipMemcpyHostToDevice);
hipMemcpy(dev_B, B, SIZE, hipMemcpyHostToDevice);
hipMemcpy(dev_C, C, SIZE, hipMemcpyHostToDevice);
dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH);
dim3 blocksPerGrid(WIDTH/TILE_WIDTH, WIDTH/TILE_WIDTH);
std::cout << "Calculating GPU" << std::endl;
std::clock_t t = std::clock();
hipLaunchKernelGGL(( matmul), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_A, dev_B, dev_C);
hipDeviceSynchronize();
t = std::clock() - t;
std::ofstream out{"time_logs.log", std::ios_base::app};
std::cout << std::setprecision(5) << getDimString(blocksPerGrid, threadsPerBlock) <<
", TIME: "<< double(t) / double(CLOCKS_PER_SEC) << std::endl;
hipMemcpy(C, dev_C, SIZE, hipMemcpyDeviceToHost);
// std::cout << "Calculating CPU" << std::endl;
// t = std::clock();
// for (int i=0; i<WIDTH; ++i) {
// for (int j=0; j<WIDTH; ++j) {
// int partial_val{0};
// for (int k=0; k<WIDTH; ++k) {
// partial_val += A[i][k] * B[k][j];
// }
// }
// }
// t = std::clock() - t;
// std::cout << std::setprecision(5) << double(t) / double(CLOCKS_PER_SEC) << std::endl;
out.close();
hipFree(dev_A); hipFree(dev_B); hipFree(dev_C);
std::cout << "Done!" << std::endl;
return 0;
}
|
5aded30fae16e933fbd6dd1793517aec17da1fd1.cu
|
#include <iostream>
#include <cuda_runtime.h>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <ctime>
#define TILE_WIDTH 20
#define WIDTH 10000
#define MATSIZE 256
#define SIZE (WIDTH * WIDTH * sizeof(float))
float A[WIDTH][WIDTH], B[WIDTH][WIDTH], C[WIDTH][WIDTH];
float *dev_A, *dev_B, *dev_C;
__global__ void matmul(float *A, float *B, float *C) {
__shared__ float sh_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float sh_B[TILE_WIDTH][TILE_WIDTH];
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (row > WIDTH || col > WIDTH)
return;
int partial_val = 0;
for (int tile_id=0; tile_id<WIDTH/TILE_WIDTH; ++tile_id) {
sh_A[threadIdx.y][threadIdx.x] = A[row * WIDTH + (tile_id * TILE_WIDTH + threadIdx.x)];
sh_B[threadIdx.y][threadIdx.x] = B[col + (tile_id * TILE_WIDTH + threadIdx.y) * WIDTH];
__syncthreads();
for (int i=0; i<TILE_WIDTH; ++i) {
partial_val += sh_A[threadIdx.y][i] * sh_B[i][threadIdx.x];
__syncthreads();
}
}
C[row * WIDTH + col] = partial_val;
}
std::string getDimString(dim3 blocks, dim3 threads) {
std::stringstream ss;
ss << "BLOCKS: (" << blocks.x << ", " << blocks.y << ")";
ss << " THREADS: (" << threads.x << ", " << threads.y << ")";
return ss.str();
}
int main () {
std::cout << "Not Done" << std::endl;
for (int i=0; i<WIDTH; ++i) {
for (int j=0; j<WIDTH; ++j) {
A[i][j] = B[i][j] = 2.5;
}
}
cudaMalloc(&dev_A, SIZE);
cudaMalloc(&dev_B, SIZE);
cudaMalloc(&dev_C, SIZE);
cudaMemcpy(dev_A, A, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, B, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dev_C, C, SIZE, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH);
dim3 blocksPerGrid(WIDTH/TILE_WIDTH, WIDTH/TILE_WIDTH);
std::cout << "Calculating GPU" << std::endl;
std::clock_t t = std::clock();
matmul<<<blocksPerGrid, threadsPerBlock>>>(dev_A, dev_B, dev_C);
cudaDeviceSynchronize();
t = std::clock() - t;
std::ofstream out{"time_logs.log", std::ios_base::app};
std::cout << std::setprecision(5) << getDimString(blocksPerGrid, threadsPerBlock) <<
", TIME: "<< double(t) / double(CLOCKS_PER_SEC) << std::endl;
cudaMemcpy(C, dev_C, SIZE, cudaMemcpyDeviceToHost);
// std::cout << "Calculating CPU" << std::endl;
// t = std::clock();
// for (int i=0; i<WIDTH; ++i) {
// for (int j=0; j<WIDTH; ++j) {
// int partial_val{0};
// for (int k=0; k<WIDTH; ++k) {
// partial_val += A[i][k] * B[k][j];
// }
// }
// }
// t = std::clock() - t;
// std::cout << std::setprecision(5) << double(t) / double(CLOCKS_PER_SEC) << std::endl;
out.close();
cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C);
std::cout << "Done!" << std::endl;
return 0;
}
|
2c143c22743d706897563624136d7f67e77acff7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <vector>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define BLOCK_SIZE 1024
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void multiplyBy2(int* data, unsigned int n) {
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
data[tid] = 2 * data[tid];
}
}
template<typename T>
std::vector<T>* getUniqueValues(std::vector<T>* input) {
std::vector<T>* uniqueValues = new std::vector<T>(*input);
std::sort(uniqueValues->begin(), uniqueValues->end());
auto ip = std::unique(uniqueValues->begin(), uniqueValues->end());
auto begin = uniqueValues->begin();
uniqueValues->resize(std::distance(begin, ip));
return uniqueValues;
}
template<typename T>
thrust::host_vector<T>* getHostVector(std::vector<T>* input) {
thrust::host_vector<T>* host_vector = new thrust::host_vector<T>();
for (auto it = input->begin(); it != input->end(); ++it) {
host_vector->push_back(*it);
}
return host_vector;
}
__host__ __device__ void variations_without_repetitions_count(int n, int k, unsigned long long* result) {
if (k > n) {
*result = 1;
return;
}
*result = 1;
for (int i = n; i > n - k; i--) {
*result *= i;
}
}
__host__ __device__ void variation(int n, int k, int variationNumber, int* result) {
bool* isTaken = new bool[n];
for (int i = 0; i < n; i++) {
isTaken[i] = false;
}
for (int x = 0; x < k ; x++) {
unsigned long long v = 0;
variations_without_repetitions_count(n - x - 1, k - x - 1, &v);
auto t = variationNumber / v;
int searchedPosition = -1;
int realPosition = 0;
for (int i = 0; i < n; i++) {
if (!isTaken[i]) {
searchedPosition++;
if (t == searchedPosition) {
realPosition = i;
break;
}
}
}
isTaken[realPosition] = true;
result[x] = realPosition;
variationNumber %= v;
}
}
__global__ void findSubstitution(
char* patternValues, int patternValuesSize,
int* seqValues, int seqValuesSize,
char* pattern, int patternSize,
int* seq, int seqSize,
int* result, unsigned long long variationCount) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index > variationCount) return;
int* variationResult = new int[patternValuesSize];
variation(seqValuesSize, patternValuesSize, index, variationResult);
int* patternWithValues = new int[patternSize];
for (int i = 0; i < patternValuesSize; i++) {
for (int j = 0; j < patternSize; j++) {
if (patternValues[i] == pattern[j]) {
patternWithValues[j] = seqValues[variationResult[i]];
}
}
}
int patternIndex = 0;
for (int i = 0; i < seqSize && patternIndex < patternSize; i++) {
if (seq[i] == patternWithValues[patternIndex]) {
patternIndex++;
}
}
if (patternIndex == patternSize) {
result[index] = 1;
}
else {
result[index] = 0;
}
}
int main()
{
std::vector<int> seq = { 1,2, 4, 3, 5, 3, 6, 2, 1 };
std::vector<char> pattern = { 'a', 'b', 'b', 'a' };
thrust::host_vector<char>* patternValues = getHostVector(getUniqueValues(&pattern));
thrust::host_vector<char>* thrustPattern = getHostVector(&pattern);
thrust::host_vector<int>* seqValues = getHostVector(getUniqueValues(&seq));
thrust::host_vector<int>* thrustSeq = getHostVector(&seq);
thrust::host_vector<int>* result = new thrust::host_vector<int>();
thrust::device_vector<char>* devPatternValues = new thrust::device_vector<char>();
thrust::device_vector<char>* devThrustPattern = new thrust::device_vector<char>();
thrust::device_vector<int>* devSeqValues = new thrust::device_vector<int>();
thrust::device_vector<int>* devThrustSeq = new thrust::device_vector<int>();
thrust::device_vector<int>* devResult = new thrust::device_vector<int>();
unsigned long long variationCount = 0;
variations_without_repetitions_count(seqValues->size(), patternValues->size(), &variationCount);
int gridSize = variationCount / BLOCK_SIZE;
if (gridSize < 1) {
gridSize = 1;
}
devPatternValues->resize(patternValues->size());
devThrustPattern->resize(thrustPattern->size());
devSeqValues->resize(seqValues->size());
devThrustSeq->resize(thrustSeq->size());
result->resize(variationCount);
devResult->resize(variationCount);
*devPatternValues = *patternValues;
*devThrustPattern = *thrustPattern;
*devSeqValues = *seqValues;
*devThrustSeq = *thrustSeq;
*devResult = *result;
hipLaunchKernelGGL(( findSubstitution) , dim3(gridSize), dim3(BLOCK_SIZE) , 0, 0,
devPatternValues->data().get(), devPatternValues->size(),
devSeqValues->data().get(), devSeqValues->size(),
devThrustPattern->data().get(), devThrustPattern->size(),
devThrustSeq->data().get(), devThrustSeq->size(),
devResult->data().get(), variationCount);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
std::cout << "cuda error: " << hipGetErrorString(err) << std::endl;
return 1;
}
*result = *devResult;
for (int i = 0; i < result->size(); i++) {
if ((*result)[i] != 0) {
int* variationResult = new int[patternValues->size()];
variation(seqValues->size(), patternValues->size(), i, variationResult);
for (int i = 0; i < patternValues->size(); i++) {
std::cout << (*patternValues)[i] << "=" << (*seqValues)[variationResult[i]] << " ";
}
std::cout << std::endl;
}
}
return 0;
}
|
2c143c22743d706897563624136d7f67e77acff7.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <vector>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define BLOCK_SIZE 1024
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void multiplyBy2(int* data, unsigned int n) {
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
data[tid] = 2 * data[tid];
}
}
template<typename T>
std::vector<T>* getUniqueValues(std::vector<T>* input) {
std::vector<T>* uniqueValues = new std::vector<T>(*input);
std::sort(uniqueValues->begin(), uniqueValues->end());
auto ip = std::unique(uniqueValues->begin(), uniqueValues->end());
auto begin = uniqueValues->begin();
uniqueValues->resize(std::distance(begin, ip));
return uniqueValues;
}
template<typename T>
thrust::host_vector<T>* getHostVector(std::vector<T>* input) {
thrust::host_vector<T>* host_vector = new thrust::host_vector<T>();
for (auto it = input->begin(); it != input->end(); ++it) {
host_vector->push_back(*it);
}
return host_vector;
}
__host__ __device__ void variations_without_repetitions_count(int n, int k, unsigned long long* result) {
if (k > n) {
*result = 1;
return;
}
*result = 1;
for (int i = n; i > n - k; i--) {
*result *= i;
}
}
__host__ __device__ void variation(int n, int k, int variationNumber, int* result) {
bool* isTaken = new bool[n];
for (int i = 0; i < n; i++) {
isTaken[i] = false;
}
for (int x = 0; x < k ; x++) {
unsigned long long v = 0;
variations_without_repetitions_count(n - x - 1, k - x - 1, &v);
auto t = variationNumber / v;
int searchedPosition = -1;
int realPosition = 0;
for (int i = 0; i < n; i++) {
if (!isTaken[i]) {
searchedPosition++;
if (t == searchedPosition) {
realPosition = i;
break;
}
}
}
isTaken[realPosition] = true;
result[x] = realPosition;
variationNumber %= v;
}
}
__global__ void findSubstitution(
char* patternValues, int patternValuesSize,
int* seqValues, int seqValuesSize,
char* pattern, int patternSize,
int* seq, int seqSize,
int* result, unsigned long long variationCount) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index > variationCount) return;
int* variationResult = new int[patternValuesSize];
variation(seqValuesSize, patternValuesSize, index, variationResult);
int* patternWithValues = new int[patternSize];
for (int i = 0; i < patternValuesSize; i++) {
for (int j = 0; j < patternSize; j++) {
if (patternValues[i] == pattern[j]) {
patternWithValues[j] = seqValues[variationResult[i]];
}
}
}
int patternIndex = 0;
for (int i = 0; i < seqSize && patternIndex < patternSize; i++) {
if (seq[i] == patternWithValues[patternIndex]) {
patternIndex++;
}
}
if (patternIndex == patternSize) {
result[index] = 1;
}
else {
result[index] = 0;
}
}
int main()
{
std::vector<int> seq = { 1,2, 4, 3, 5, 3, 6, 2, 1 };
std::vector<char> pattern = { 'a', 'b', 'b', 'a' };
thrust::host_vector<char>* patternValues = getHostVector(getUniqueValues(&pattern));
thrust::host_vector<char>* thrustPattern = getHostVector(&pattern);
thrust::host_vector<int>* seqValues = getHostVector(getUniqueValues(&seq));
thrust::host_vector<int>* thrustSeq = getHostVector(&seq);
thrust::host_vector<int>* result = new thrust::host_vector<int>();
thrust::device_vector<char>* devPatternValues = new thrust::device_vector<char>();
thrust::device_vector<char>* devThrustPattern = new thrust::device_vector<char>();
thrust::device_vector<int>* devSeqValues = new thrust::device_vector<int>();
thrust::device_vector<int>* devThrustSeq = new thrust::device_vector<int>();
thrust::device_vector<int>* devResult = new thrust::device_vector<int>();
unsigned long long variationCount = 0;
variations_without_repetitions_count(seqValues->size(), patternValues->size(), &variationCount);
int gridSize = variationCount / BLOCK_SIZE;
if (gridSize < 1) {
gridSize = 1;
}
devPatternValues->resize(patternValues->size());
devThrustPattern->resize(thrustPattern->size());
devSeqValues->resize(seqValues->size());
devThrustSeq->resize(thrustSeq->size());
result->resize(variationCount);
devResult->resize(variationCount);
*devPatternValues = *patternValues;
*devThrustPattern = *thrustPattern;
*devSeqValues = *seqValues;
*devThrustSeq = *thrustSeq;
*devResult = *result;
findSubstitution <<< gridSize, BLOCK_SIZE >>> (
devPatternValues->data().get(), devPatternValues->size(),
devSeqValues->data().get(), devSeqValues->size(),
devThrustPattern->data().get(), devThrustPattern->size(),
devThrustSeq->data().get(), devThrustSeq->size(),
devResult->data().get(), variationCount);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
std::cout << "cuda error: " << cudaGetErrorString(err) << std::endl;
return 1;
}
*result = *devResult;
for (int i = 0; i < result->size(); i++) {
if ((*result)[i] != 0) {
int* variationResult = new int[patternValues->size()];
variation(seqValues->size(), patternValues->size(), i, variationResult);
for (int i = 0; i < patternValues->size(); i++) {
std::cout << (*patternValues)[i] << "=" << (*seqValues)[variationResult[i]] << " ";
}
std::cout << std::endl;
}
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.