hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
998d90e38892266ecb1800553be8e6ec52df05ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void kNMLUpdate1_kernel( int numAtoms, int paddedNumAtoms, float tau, float dt, float kT, float4 *posq, float4 *posqP, float4 *velm, long long *force, const float4 *__restrict__ random, unsigned int randomIndex ) { /* Update the velocity.*/ const float vscale = exp( -dt / tau ); const float fscale = ( 1.0f - vscale ) * tau; const float noisescale = sqrt( kT * ( 1 - vscale * vscale ) ); for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) { const float4 n = random[randomIndex + blockIdx.x * blockDim.x + threadIdx.x]; const float4 randomNoise = make_float4( n.x * noisescale, n.y * noisescale, n.z * noisescale, n.w * noisescale ); const float sqrtInvMass = sqrt( velm[atom].w ); float4 v = velm[atom]; float fx = ( float )force[atom] / ( float )0x100000000; float fy = ( float )force[atom + 1 * paddedNumAtoms] / ( float )0x100000000; float fz = ( float )force[atom + 2 * paddedNumAtoms] / ( float )0x100000000; v.x = ( vscale * v.x ) + ( fscale * fx * v.w ) + ( randomNoise.x * sqrtInvMass ); v.y = ( vscale * v.y ) + ( fscale * fy * v.w ) + ( randomNoise.y * sqrtInvMass ); v.z = ( vscale * v.z ) + ( fscale * fz * v.w ) + ( randomNoise.z * sqrtInvMass ); velm[atom] = v; } } extern "C" __global__ void kNMLUpdate2_kernel( int numAtoms, int numModes, float4 *velm, float4 *modes, float *modeWeights ) { extern __shared__ float dotBuffer[]; for( int mode = blockIdx.x; mode < numModes; mode += gridDim.x ) { /* Compute the projection of the mass weighted velocity onto one normal mode vector. */ float dot = 0.0f; for( int atom = threadIdx.x; atom < numAtoms; atom += blockDim.x ) { const int modePos = mode * numAtoms + atom; const float scale = 1.0f / sqrt( velm[atom].w ); float4 v = velm[atom]; float4 m = modes[modePos]; dot += scale * ( v.x * m.x + v.y * m.y + v.z * m.z ); } dotBuffer[threadIdx.x] = dot; __syncthreads(); if( threadIdx.x == 0 ) { float sum = 0; for( int i = 0; i < blockDim.x; i++ ) { sum += dotBuffer[i]; } modeWeights[mode] = sum; } } } extern "C" __global__ void kNMLUpdate3_kernel( int numAtoms, int numModes, float dt, float4 *posq, float4 *velm, float4 *modes, float *modeWeights, float4 *noiseVal ) { /* Load the weights into shared memory. */ extern __shared__ float weightBuffer[]; for( int mode = threadIdx.x; mode < numModes; mode += blockDim.x ) { weightBuffer[mode] = modeWeights[mode]; } __syncthreads(); /* Compute the projected velocities and update the atom positions. */ for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) { const float invMass = velm[atom].w, scale = sqrt( invMass ); float3 v = make_float3( 0.0f, 0.0f, 0.0f ); for( int mode = 0; mode < numModes; mode++ ) { float4 m = modes[mode * numAtoms + atom]; float weight = weightBuffer[mode]; v.x += m.x * weight; v.y += m.y * weight; v.z += m.z * weight; } v.x *= scale; v.y *= scale; v.z *= scale; velm[atom] = make_float4( v.x, v.y, v.z, invMass ); float4 pos = posq[atom]; /* Add Step */ pos.x += dt * v.x; pos.y += dt * v.y; pos.z += dt * v.z; #ifdef FAST_NOISE /* Remove Noise */ pos.x -= noiseVal[atom].x; pos.y -= noiseVal[atom].y; pos.z -= noiseVal[atom].z; #endif posq[atom] = pos; } }
998d90e38892266ecb1800553be8e6ec52df05ed.cu
extern "C" __global__ void kNMLUpdate1_kernel( int numAtoms, int paddedNumAtoms, float tau, float dt, float kT, float4 *posq, float4 *posqP, float4 *velm, long long *force, const float4 *__restrict__ random, unsigned int randomIndex ) { /* Update the velocity.*/ const float vscale = exp( -dt / tau ); const float fscale = ( 1.0f - vscale ) * tau; const float noisescale = sqrt( kT * ( 1 - vscale * vscale ) ); for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) { const float4 n = random[randomIndex + blockIdx.x * blockDim.x + threadIdx.x]; const float4 randomNoise = make_float4( n.x * noisescale, n.y * noisescale, n.z * noisescale, n.w * noisescale ); const float sqrtInvMass = sqrt( velm[atom].w ); float4 v = velm[atom]; float fx = ( float )force[atom] / ( float )0x100000000; float fy = ( float )force[atom + 1 * paddedNumAtoms] / ( float )0x100000000; float fz = ( float )force[atom + 2 * paddedNumAtoms] / ( float )0x100000000; v.x = ( vscale * v.x ) + ( fscale * fx * v.w ) + ( randomNoise.x * sqrtInvMass ); v.y = ( vscale * v.y ) + ( fscale * fy * v.w ) + ( randomNoise.y * sqrtInvMass ); v.z = ( vscale * v.z ) + ( fscale * fz * v.w ) + ( randomNoise.z * sqrtInvMass ); velm[atom] = v; } } extern "C" __global__ void kNMLUpdate2_kernel( int numAtoms, int numModes, float4 *velm, float4 *modes, float *modeWeights ) { extern __shared__ float dotBuffer[]; for( int mode = blockIdx.x; mode < numModes; mode += gridDim.x ) { /* Compute the projection of the mass weighted velocity onto one normal mode vector. */ float dot = 0.0f; for( int atom = threadIdx.x; atom < numAtoms; atom += blockDim.x ) { const int modePos = mode * numAtoms + atom; const float scale = 1.0f / sqrt( velm[atom].w ); float4 v = velm[atom]; float4 m = modes[modePos]; dot += scale * ( v.x * m.x + v.y * m.y + v.z * m.z ); } dotBuffer[threadIdx.x] = dot; __syncthreads(); if( threadIdx.x == 0 ) { float sum = 0; for( int i = 0; i < blockDim.x; i++ ) { sum += dotBuffer[i]; } modeWeights[mode] = sum; } } } extern "C" __global__ void kNMLUpdate3_kernel( int numAtoms, int numModes, float dt, float4 *posq, float4 *velm, float4 *modes, float *modeWeights, float4 *noiseVal ) { /* Load the weights into shared memory. */ extern __shared__ float weightBuffer[]; for( int mode = threadIdx.x; mode < numModes; mode += blockDim.x ) { weightBuffer[mode] = modeWeights[mode]; } __syncthreads(); /* Compute the projected velocities and update the atom positions. */ for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) { const float invMass = velm[atom].w, scale = sqrt( invMass ); float3 v = make_float3( 0.0f, 0.0f, 0.0f ); for( int mode = 0; mode < numModes; mode++ ) { float4 m = modes[mode * numAtoms + atom]; float weight = weightBuffer[mode]; v.x += m.x * weight; v.y += m.y * weight; v.z += m.z * weight; } v.x *= scale; v.y *= scale; v.z *= scale; velm[atom] = make_float4( v.x, v.y, v.z, invMass ); float4 pos = posq[atom]; /* Add Step */ pos.x += dt * v.x; pos.y += dt * v.y; pos.z += dt * v.z; #ifdef FAST_NOISE /* Remove Noise */ pos.x -= noiseVal[atom].x; pos.y -= noiseVal[atom].y; pos.z -= noiseVal[atom].z; #endif posq[atom] = pos; } }
a89d0d71259a2f3be6582d7c77b3558820e1d895.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; FILE *fp; fp = fopen("C:\\Users\\Calin\\Documents\\AAA CalPoly\\grad\\515\\bw_sid.txt", "r"); printf("HERE\n"); int *img = (int *)malloc(sizeof(int) * 1334 * 750); for (int i = 0; i < (1334 * 750); i++) { fscanf(fp, "%d", &img[i]); } int idx; for (int row = 0; row < (1334); row++) { for (int col = 0; col < 750; col++) { idx = (750 * row) + col; fprintf(stderr, "%d ", img[idx]); } printf("\n"); } free(img); while (1) {} // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
a89d0d71259a2f3be6582d7c77b3558820e1d895.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; FILE *fp; fp = fopen("C:\\Users\\Calin\\Documents\\AAA CalPoly\\grad\\515\\bw_sid.txt", "r"); printf("HERE\n"); int *img = (int *)malloc(sizeof(int) * 1334 * 750); for (int i = 0; i < (1334 * 750); i++) { fscanf(fp, "%d", &img[i]); } int idx; for (int row = 0; row < (1334); row++) { for (int col = 0; col < 750; col++) { idx = (750 * row) + col; fprintf(stderr, "%d ", img[idx]); } printf("\n"); } free(img); while (1) {} // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
24b6079ef6ba96d7a07c9fb43f53b100dae8d724.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <ctime> #include <cstring> // memset #include <cstdlib> // rand, RAND_MAX #include <cmath> // sqrtf #include <string> #include <vector> using namespace std; float randomf(){ return (rand()+0.5)/(RAND_MAX+1.0); } static double get_time(){ timespec tp; clock_gettime(CLOCK_MONOTONIC,&tp); return tp.tv_sec+tp.tv_nsec*1e-9; } // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) // output: idx (b,m,nsample) __global__ void query_ball_point_gpu(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { int batch_index = blockIdx.x; xyz1 += n*3*batch_index; xyz2 += m*3*batch_index; idx += m*nsample*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius[0]) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } } } // input: points (b,n,c), idx (b,m,nsample) // output: out (b,m,nsample,c) __global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) { int batch_index = blockIdx.x; points += n*c*batch_index; idx += m*nsample*batch_index; out += m*nsample*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { out[j*nsample*c+k*c+l] = points[ii*c+l]; } } } } // input: grad_out (b,m,nsample,c), idx (b,m,nsample), // output: grad_points (b,n,c) __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int batch_index = blockIdx.x; idx += m*nsample*batch_index; grad_out += m*nsample*c*batch_index; grad_points += n*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { // Use atomic add to avoid race condition atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]); } } } } int main() { int b=32,n=512,m=128,nsample=64,c=64; float radius=0.1; float *xyz1, *xyz2, *points; hipMallocManaged(&xyz1, b*n*3*sizeof(float)); hipMallocManaged(&xyz2, b*m*3*sizeof(float)); hipMallocManaged(&points, b*n*c*sizeof(float)); int *idx; hipMallocManaged(&idx, b*m*nsample*sizeof(int)); memset(idx, 0, sizeof(int)*b*m*nsample); float *out, *grad_out; hipMallocManaged(&out, b*m*nsample*c*sizeof(float)); hipMallocManaged(&grad_out, b*m*nsample*c*sizeof(float)); memset(grad_out, 0.0, sizeof(float)*b*m*nsample*c); float *grad_points; hipMallocManaged(&grad_points, b*n*c*sizeof(float)); for (int i=0;i<b*n*3;i++) xyz1[i]=randomf(); for (int i=0;i<b*m*3;i++) xyz2[i]=randomf(); for (int i=0;i<b*n*c;i++) points[i]=randomf(); double t0=get_time(); hipLaunchKernelGGL(( query_ball_point_gpu), dim3(b),dim3(256), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx); hipDeviceSynchronize(); printf("query_ball_point gpu time %f\n",get_time()-t0); t0=get_time(); hipLaunchKernelGGL(( group_point_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,points,idx,out); hipDeviceSynchronize(); printf("grou_point gpu time %f\n",get_time()-t0); t0=get_time(); hipLaunchKernelGGL(( group_point_grad_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,grad_out,idx,grad_points); hipDeviceSynchronize(); printf("grou_point_grad gpu time %f\n",get_time()-t0); hipFree(xyz1); hipFree(xyz2); hipFree(points); hipFree(idx); hipFree(out); hipFree(grad_out); hipFree(grad_points); return 0; }
24b6079ef6ba96d7a07c9fb43f53b100dae8d724.cu
#include <cstdio> #include <ctime> #include <cstring> // memset #include <cstdlib> // rand, RAND_MAX #include <cmath> // sqrtf #include <string> #include <vector> using namespace std; float randomf(){ return (rand()+0.5)/(RAND_MAX+1.0); } static double get_time(){ timespec tp; clock_gettime(CLOCK_MONOTONIC,&tp); return tp.tv_sec+tp.tv_nsec*1e-9; } // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) // output: idx (b,m,nsample) __global__ void query_ball_point_gpu(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { int batch_index = blockIdx.x; xyz1 += n*3*batch_index; xyz2 += m*3*batch_index; idx += m*nsample*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius[0]) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } } } // input: points (b,n,c), idx (b,m,nsample) // output: out (b,m,nsample,c) __global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) { int batch_index = blockIdx.x; points += n*c*batch_index; idx += m*nsample*batch_index; out += m*nsample*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { out[j*nsample*c+k*c+l] = points[ii*c+l]; } } } } // input: grad_out (b,m,nsample,c), idx (b,m,nsample), // output: grad_points (b,n,c) __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int batch_index = blockIdx.x; idx += m*nsample*batch_index; grad_out += m*nsample*c*batch_index; grad_points += n*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { // Use atomic add to avoid race condition atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]); } } } } int main() { int b=32,n=512,m=128,nsample=64,c=64; float radius=0.1; float *xyz1, *xyz2, *points; cudaMallocManaged(&xyz1, b*n*3*sizeof(float)); cudaMallocManaged(&xyz2, b*m*3*sizeof(float)); cudaMallocManaged(&points, b*n*c*sizeof(float)); int *idx; cudaMallocManaged(&idx, b*m*nsample*sizeof(int)); memset(idx, 0, sizeof(int)*b*m*nsample); float *out, *grad_out; cudaMallocManaged(&out, b*m*nsample*c*sizeof(float)); cudaMallocManaged(&grad_out, b*m*nsample*c*sizeof(float)); memset(grad_out, 0.0, sizeof(float)*b*m*nsample*c); float *grad_points; cudaMallocManaged(&grad_points, b*n*c*sizeof(float)); for (int i=0;i<b*n*3;i++) xyz1[i]=randomf(); for (int i=0;i<b*m*3;i++) xyz2[i]=randomf(); for (int i=0;i<b*n*c;i++) points[i]=randomf(); double t0=get_time(); query_ball_point_gpu<<<b,256>>>(b,n,m,radius,nsample,xyz1,xyz2,idx); cudaDeviceSynchronize(); printf("query_ball_point gpu time %f\n",get_time()-t0); t0=get_time(); group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out); cudaDeviceSynchronize(); printf("grou_point gpu time %f\n",get_time()-t0); t0=get_time(); group_point_grad_gpu<<<b,256>>>(b,n,c,m,nsample,grad_out,idx,grad_points); cudaDeviceSynchronize(); printf("grou_point_grad gpu time %f\n",get_time()-t0); cudaFree(xyz1); cudaFree(xyz2); cudaFree(points); cudaFree(idx); cudaFree(out); cudaFree(grad_out); cudaFree(grad_points); return 0; }
ff365dd3b741e184ef1fe0907aa774de8efd2870.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * Copyright (c) 2017 Microsoft * Licensed under The Apache-2.0 License [see LICENSE for details] * \file multi_proposal.cu * \brief MultiProposal Operator * \author Shaoqing Ren, Xizhou Zhu, Jian Guo */ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <map> #include <vector> #include <string> #include <utility> #include <ctime> #include <iostream> #include "../operator_common.h" #include "../mshadow_op.h" #include "./multi_proposal-inl.h" #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) #define FRCNN_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { namespace multi_proposal { // scores are (b, 2 * anchor, h, w) // workspace_proposals are (b, h * w * anchor, 5) // w defines "x" and h defines "y" // count should be total anchors numbers, h * w * anchors template<typename Dtype> __global__ void ProposalGridKernel(const int count, const int num_anchors, const int height, const int width, const int feature_stride, const Dtype* scores, Dtype* workspace_proposals) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { int a = index % num_anchors; int w = (index / num_anchors) % width; int h = (index / num_anchors / width) % height; int b = index / num_anchors / width / height; workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride; workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride; workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride; workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride; workspace_proposals[index * 5 + 4] = scores[((b * (2 * num_anchors) + a + num_anchors) * height + h) * width + w]; } } // boxes are (b, h * w * anchor, 5) // deltas are (b, 4 * anchor, h, w) // out_pred_boxes are (b, h * w * anchor, 5) // count should be total anchors numbers, b * h * w * anchors // in-place write: boxes and out_pred_boxes are the same location template<typename Dtype> __global__ void BBoxPredKernel(const int count, const int num_anchors, const int feat_height, const int feat_width, const int feature_stride, const Dtype* im_infos, const Dtype* boxes, const Dtype* deltas, Dtype* out_pred_boxes) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { int a = index % num_anchors; int w = (index / num_anchors) % feat_width; int h = (index / num_anchors / feat_width) % feat_height; int b = index / num_anchors / feat_width / feat_height; float im_height = im_infos[b * 3]; float im_width = im_infos[b * 3 + 1]; int real_height = static_cast<int>(im_height / feature_stride); int real_width = static_cast<int>(im_width / feature_stride); float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f; float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f; float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f); float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f); int ba = (b * num_anchors + a); float dx = deltas[((ba * 4) * feat_height + h) * feat_width + w]; float dy = deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w]; float dw = deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w]; float dh = deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w]; float pred_ctr_x = dx * width + ctr_x; float pred_ctr_y = dy * height + ctr_y; float pred_w = exp(dw) * width; float pred_h = exp(dh) * height; float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f); float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f); float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f); float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f); pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f); pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f); pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f); pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f); out_pred_boxes[index * 5 + 0] = pred_x1; out_pred_boxes[index * 5 + 1] = pred_y1; out_pred_boxes[index * 5 + 2] = pred_x2; out_pred_boxes[index * 5 + 3] = pred_y2; if (h >= real_height || w >= real_width) { out_pred_boxes[index * 5 + 4] = -1.0f; } } } // boxes are (b, h * w * anchor, 5) // deltas are (b, 4 * anchor, h, w) // out_pred_boxes are (b, h * w * anchor, 5) // count should be total anchors numbers, b * h * w * anchors // in-place write: boxes and out_pred_boxes are the same location template<typename Dtype> __global__ void IoUPredKernel(const int count, const int num_anchors, const int feat_height, const int feat_width, const int feature_stride, const Dtype* im_infos, const Dtype* boxes, const Dtype* deltas, Dtype* out_pred_boxes) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { int a = index % num_anchors; int w = (index / num_anchors) % feat_width; int h = (index / num_anchors / feat_width) % feat_height; int b = index / num_anchors / feat_width / feat_height; float im_height = im_infos[b * 3]; float im_width = im_infos[b * 3 + 1]; int real_height = static_cast<int>(im_height / feature_stride); int real_width = static_cast<int>(im_width / feature_stride); float x1 = boxes[index * 5 + 0]; float y1 = boxes[index * 5 + 1]; float x2 = boxes[index * 5 + 2]; float y2 = boxes[index * 5 + 3]; int ba = (b * num_anchors + a); float dx1 = deltas[((ba * 4) * feat_height + h) * feat_width + w]; float dy1 = deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w]; float dx2 = deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w]; float dy2 = deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w]; float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f); float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f); float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f); float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f); out_pred_boxes[index * 5 + 0] = pred_x1; out_pred_boxes[index * 5 + 1] = pred_y1; out_pred_boxes[index * 5 + 2] = pred_x2; out_pred_boxes[index * 5 + 3] = pred_y2; if (h >= real_height || w >= real_width) { out_pred_boxes[index * 5 + 4] = -1.0f; } } } // filter box with stride less than rpn_min_size // filter: set score to zero // dets (b, n, 5) template<typename Dtype> __global__ void FilterBoxKernel(const int count, const int count_anchors, const float original_min_size, const Dtype* im_infos, Dtype* dets) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { int b = index / count_anchors; float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f; float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f; float min_size = original_min_size * im_infos[b * 3 + 2]; if (iw < min_size || ih < min_size) { dets[index * 5 + 0] -= min_size / 2; dets[index * 5 + 1] -= min_size / 2; dets[index * 5 + 2] += min_size / 2; dets[index * 5 + 3] += min_size / 2; dets[index * 5 + 4] = -1.0f; } } } // copy score and init order // dets (n, 5); score (n, ); order (n, ) // count should be n (total anchors or proposals) template<typename Dtype> __global__ void CopyScoreKernel(const int count, const Dtype* dets, Dtype* score, int* order) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { score[index] = dets[index * 5 + 4]; order[index] = index; } } // reorder proposals according to order and keep the top_n proposals // prev_dets (n, 5); order (n, ); dets (n, 5) // count should be output anchor numbers (top_n) template<typename Dtype> __global__ void ReorderProposalsKernel(const int count, const Dtype* prev_dets, const int* order, Dtype* dets) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { const int order_i = order[index]; for (int j = 0; j < 5; j ++) { dets[index * 5 + j] = prev_dets[order_i * 5 + j]; } } } __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, uint64_t *dev_mask) { const int threadsPerBlock = sizeof(uint64_t) * 8; const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; uint64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _nms(const mshadow::Tensor<gpu, 2>& boxes, const float nms_overlap_thresh, const int rpn_post_nms_top_n, int *keep, int *num_out) { const int threadsPerBlock = sizeof(uint64_t) * 8; const int boxes_num = boxes.size(0); const int boxes_dim = boxes.size(1); float* boxes_dev = boxes.dptr_; uint64_t* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); FRCNN_CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(uint64_t))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); FRCNN_CUDA_CHECK(hipPeekAtLastError()); std::vector<uint64_t> mask_host(boxes_num * col_blocks); FRCNN_CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(uint64_t) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<uint64_t> remv(col_blocks); memset(&remv[0], 0, sizeof(uint64_t) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep[num_to_keep++] = i; if (num_to_keep >= rpn_post_nms_top_n) break; uint64_t *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; FRCNN_CUDA_CHECK(hipFree(mask_dev)); } // copy proposals to output // dets (top_n, 5); keep (top_n, ); out (top_n, ) // count should be top_n (total anchors or proposals) template<typename Dtype> __global__ void PrepareOutput(const int count, const Dtype* dets, const int* keep, const int out_size, const int image_index, Dtype* out, Dtype* score) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { out[index * 5] = image_index; if (index < out_size) { int keep_i = keep[index]; for (int j = 0; j < 4; ++j) { out[index * 5 + j + 1] = dets[keep_i * 5 + j]; } score[index] = dets[keep_i * 5 + 4]; } else { int keep_i = keep[index % out_size]; for (int j = 0; j < 4; ++j) { out[index * 5 + j + 1] = dets[keep_i * 5 + j]; } score[index] = dets[keep_i * 5 + 4]; } } } } // namespace multi_proposal } // namespace cuda } // namespace mshadow namespace mxnet { namespace op { template<typename xpu> class MultiProposalGPUOp : public Operator{ public: explicit MultiProposalGPUOp(MultiProposalParam param) { this->param_ = param; } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; using namespace mshadow::cuda; using namespace mshadow::cuda::multi_proposal; CHECK_EQ(in_data.size(), 3); CHECK_EQ(out_data.size(), 2); CHECK_GT(req.size(), 1); CHECK_EQ(req[proposal::kOut], kWriteTo); /*CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1) << "Sorry, multiple images each device is not implemented.";*/ Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 4> scores = in_data[proposal::kClsProb].get<xpu, 4, real_t>(s); Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s); Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s); Tensor<xpu, 2> out = out_data[proposal::kOut].get<xpu, 2, real_t>(s); Tensor<xpu, 2> out_score = out_data[proposal::kScore].get<xpu, 2, real_t>(s); int num_images = scores.size(0); int num_anchors = scores.size(1) / 2; int height = scores.size(2); int width = scores.size(3); int count_anchors = num_anchors * height * width; // count of total anchors int count = num_images * count_anchors; // set to -1 for max int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count_anchors; rpn_pre_nms_top_n = ::min(rpn_pre_nms_top_n, count_anchors); int rpn_post_nms_top_n = ::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n); // Generate first anchors based on base anchor std::vector<float> base_anchor(4); base_anchor[0] = 0.0; base_anchor[1] = 0.0; base_anchor[2] = param_.feature_stride - 1.0; base_anchor[3] = param_.feature_stride - 1.0; CHECK_EQ(num_anchors, param_.ratios.ndim() * param_.scales.ndim()); std::vector<float> anchors; utils::GenerateAnchors(base_anchor, param_.ratios, param_.scales, &anchors); // Copy generated anchors to GPU float* workspace_proposals_ptr = NULL; FRCNN_CUDA_CHECK(hipMalloc(&workspace_proposals_ptr, sizeof(float) * num_images * count_anchors * 5)); Tensor<xpu, 3> workspace_proposals(workspace_proposals_ptr, Shape3(num_images, count_anchors, 5)); FRCNN_CUDA_CHECK(hipMemcpy(workspace_proposals.dptr_, &anchors[0], sizeof(float) * anchors.size(), hipMemcpyHostToDevice)); // Copy proposals to a mesh grid dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid"); hipLaunchKernelGGL(( ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, count, num_anchors, height, width, param_.feature_stride, scores.dptr_, workspace_proposals.dptr_); FRCNN_CUDA_CHECK(hipPeekAtLastError()); // Transform anchors and bbox_deltas into bboxes CheckLaunchParam(dimGrid, dimBlock, "BBoxPred"); if (param_.iou_loss) { hipLaunchKernelGGL(( IoUPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, count, num_anchors, height, width, param_.feature_stride, im_info.dptr_, workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_); } else { hipLaunchKernelGGL(( BBoxPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, count, num_anchors, height, width, param_.feature_stride, im_info.dptr_, workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_); } FRCNN_CUDA_CHECK(hipPeekAtLastError()); // filter boxes with less than rpn_min_size CheckLaunchParam(dimGrid, dimBlock, "FilterBox"); hipLaunchKernelGGL(( FilterBoxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, count, count_anchors, param_.rpn_min_size, im_info.dptr_, workspace_proposals.dptr_); FRCNN_CUDA_CHECK(hipPeekAtLastError()); dimGrid = dim3((count_anchors + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock); dimBlock = dim3(kMaxThreadsPerBlock); // Copy score to a continuous memory float* score_ptr = NULL; FRCNN_CUDA_CHECK(hipMalloc(&score_ptr, sizeof(float) * count_anchors)); Tensor<xpu, 1> score(score_ptr, Shape1(count_anchors)); int* order_ptr = NULL; FRCNN_CUDA_CHECK(hipMalloc(&order_ptr, sizeof(int) * count_anchors)); Tensor<xpu, 1, int> order(order_ptr, Shape1(count_anchors)); float* workspace_ordered_proposals_ptr = NULL; FRCNN_CUDA_CHECK(hipMalloc(&workspace_ordered_proposals_ptr, sizeof(float) * rpn_pre_nms_top_n * 5)); Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr, Shape2(rpn_pre_nms_top_n, 5)); int* keep; FRCNN_CUDA_CHECK(hipMalloc(&keep, sizeof(int) * rpn_pre_nms_top_n)); for (int b = 0; b < num_images; b++) { CheckLaunchParam(dimGrid, dimBlock, "CopyScore"); CopyScoreKernel << <dimGrid, dimBlock >> >( count_anchors, workspace_proposals.dptr_ + b * count_anchors * 5, score.dptr_, order.dptr_); FRCNN_CUDA_CHECK(hipPeekAtLastError()); // argsort score, save order thrust::stable_sort_by_key(thrust::device, score.dptr_, score.dptr_ + score.size(0), order.dptr_, thrust::greater<real_t>()); FRCNN_CUDA_CHECK(hipPeekAtLastError()); // Reorder proposals according to order dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals"); ReorderProposalsKernel << <dimGrid, dimBlock >> >( rpn_pre_nms_top_n, workspace_proposals.dptr_ + b * count_anchors * 5, order.dptr_, workspace_ordered_proposals.dptr_); FRCNN_CUDA_CHECK(hipPeekAtLastError()); // perform nms std::vector<int> _keep(workspace_ordered_proposals.size(0)); int out_size = 0; _nms(workspace_ordered_proposals, param_.threshold, rpn_post_nms_top_n, &_keep[0], &out_size); // copy nms result to gpu FRCNN_CUDA_CHECK(hipMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(), hipMemcpyHostToDevice)); // copy results after nms dimGrid.x = (param_.rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput"); PrepareOutput << <dimGrid, dimBlock >> >( param_.rpn_post_nms_top_n, workspace_ordered_proposals.dptr_, keep, out_size, b, out.dptr_ + b * param_.rpn_post_nms_top_n * 5, out_score.dptr_ + b * param_.rpn_post_nms_top_n); FRCNN_CUDA_CHECK(hipPeekAtLastError()); } // free temporary memory FRCNN_CUDA_CHECK(hipFree(keep)); FRCNN_CUDA_CHECK(hipFree(workspace_ordered_proposals_ptr)); FRCNN_CUDA_CHECK(hipFree(workspace_proposals_ptr)); FRCNN_CUDA_CHECK(hipFree(score_ptr)); FRCNN_CUDA_CHECK(hipFree(order_ptr)); } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_grad.size(), 3); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s); Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s); Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s); // can not assume the grad would be zero Assign(gscores, req[proposal::kClsProb], 0); Assign(gbbox, req[proposal::kBBoxPred], 0); Assign(ginfo, req[proposal::kImInfo], 0); } private: MultiProposalParam param_; }; // class MultiProposalGPUOp template<> Operator* CreateOp<gpu>(MultiProposalParam param) { return new MultiProposalGPUOp<gpu>(param); } } // namespace op } // namespace mxnet
ff365dd3b741e184ef1fe0907aa774de8efd2870.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * Copyright (c) 2017 Microsoft * Licensed under The Apache-2.0 License [see LICENSE for details] * \file multi_proposal.cu * \brief MultiProposal Operator * \author Shaoqing Ren, Xizhou Zhu, Jian Guo */ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <map> #include <vector> #include <string> #include <utility> #include <ctime> #include <iostream> #include "../operator_common.h" #include "../mshadow_op.h" #include "./multi_proposal-inl.h" #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) #define FRCNN_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { namespace multi_proposal { // scores are (b, 2 * anchor, h, w) // workspace_proposals are (b, h * w * anchor, 5) // w defines "x" and h defines "y" // count should be total anchors numbers, h * w * anchors template<typename Dtype> __global__ void ProposalGridKernel(const int count, const int num_anchors, const int height, const int width, const int feature_stride, const Dtype* scores, Dtype* workspace_proposals) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { int a = index % num_anchors; int w = (index / num_anchors) % width; int h = (index / num_anchors / width) % height; int b = index / num_anchors / width / height; workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride; workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride; workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride; workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride; workspace_proposals[index * 5 + 4] = scores[((b * (2 * num_anchors) + a + num_anchors) * height + h) * width + w]; } } // boxes are (b, h * w * anchor, 5) // deltas are (b, 4 * anchor, h, w) // out_pred_boxes are (b, h * w * anchor, 5) // count should be total anchors numbers, b * h * w * anchors // in-place write: boxes and out_pred_boxes are the same location template<typename Dtype> __global__ void BBoxPredKernel(const int count, const int num_anchors, const int feat_height, const int feat_width, const int feature_stride, const Dtype* im_infos, const Dtype* boxes, const Dtype* deltas, Dtype* out_pred_boxes) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { int a = index % num_anchors; int w = (index / num_anchors) % feat_width; int h = (index / num_anchors / feat_width) % feat_height; int b = index / num_anchors / feat_width / feat_height; float im_height = im_infos[b * 3]; float im_width = im_infos[b * 3 + 1]; int real_height = static_cast<int>(im_height / feature_stride); int real_width = static_cast<int>(im_width / feature_stride); float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f; float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f; float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f); float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f); int ba = (b * num_anchors + a); float dx = deltas[((ba * 4) * feat_height + h) * feat_width + w]; float dy = deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w]; float dw = deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w]; float dh = deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w]; float pred_ctr_x = dx * width + ctr_x; float pred_ctr_y = dy * height + ctr_y; float pred_w = exp(dw) * width; float pred_h = exp(dh) * height; float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f); float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f); float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f); float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f); pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f); pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f); pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f); pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f); out_pred_boxes[index * 5 + 0] = pred_x1; out_pred_boxes[index * 5 + 1] = pred_y1; out_pred_boxes[index * 5 + 2] = pred_x2; out_pred_boxes[index * 5 + 3] = pred_y2; if (h >= real_height || w >= real_width) { out_pred_boxes[index * 5 + 4] = -1.0f; } } } // boxes are (b, h * w * anchor, 5) // deltas are (b, 4 * anchor, h, w) // out_pred_boxes are (b, h * w * anchor, 5) // count should be total anchors numbers, b * h * w * anchors // in-place write: boxes and out_pred_boxes are the same location template<typename Dtype> __global__ void IoUPredKernel(const int count, const int num_anchors, const int feat_height, const int feat_width, const int feature_stride, const Dtype* im_infos, const Dtype* boxes, const Dtype* deltas, Dtype* out_pred_boxes) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { int a = index % num_anchors; int w = (index / num_anchors) % feat_width; int h = (index / num_anchors / feat_width) % feat_height; int b = index / num_anchors / feat_width / feat_height; float im_height = im_infos[b * 3]; float im_width = im_infos[b * 3 + 1]; int real_height = static_cast<int>(im_height / feature_stride); int real_width = static_cast<int>(im_width / feature_stride); float x1 = boxes[index * 5 + 0]; float y1 = boxes[index * 5 + 1]; float x2 = boxes[index * 5 + 2]; float y2 = boxes[index * 5 + 3]; int ba = (b * num_anchors + a); float dx1 = deltas[((ba * 4) * feat_height + h) * feat_width + w]; float dy1 = deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w]; float dx2 = deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w]; float dy2 = deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w]; float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f); float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f); float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f); float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f); out_pred_boxes[index * 5 + 0] = pred_x1; out_pred_boxes[index * 5 + 1] = pred_y1; out_pred_boxes[index * 5 + 2] = pred_x2; out_pred_boxes[index * 5 + 3] = pred_y2; if (h >= real_height || w >= real_width) { out_pred_boxes[index * 5 + 4] = -1.0f; } } } // filter box with stride less than rpn_min_size // filter: set score to zero // dets (b, n, 5) template<typename Dtype> __global__ void FilterBoxKernel(const int count, const int count_anchors, const float original_min_size, const Dtype* im_infos, Dtype* dets) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { int b = index / count_anchors; float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f; float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f; float min_size = original_min_size * im_infos[b * 3 + 2]; if (iw < min_size || ih < min_size) { dets[index * 5 + 0] -= min_size / 2; dets[index * 5 + 1] -= min_size / 2; dets[index * 5 + 2] += min_size / 2; dets[index * 5 + 3] += min_size / 2; dets[index * 5 + 4] = -1.0f; } } } // copy score and init order // dets (n, 5); score (n, ); order (n, ) // count should be n (total anchors or proposals) template<typename Dtype> __global__ void CopyScoreKernel(const int count, const Dtype* dets, Dtype* score, int* order) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { score[index] = dets[index * 5 + 4]; order[index] = index; } } // reorder proposals according to order and keep the top_n proposals // prev_dets (n, 5); order (n, ); dets (n, 5) // count should be output anchor numbers (top_n) template<typename Dtype> __global__ void ReorderProposalsKernel(const int count, const Dtype* prev_dets, const int* order, Dtype* dets) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { const int order_i = order[index]; for (int j = 0; j < 5; j ++) { dets[index * 5 + j] = prev_dets[order_i * 5 + j]; } } } __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, uint64_t *dev_mask) { const int threadsPerBlock = sizeof(uint64_t) * 8; const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; uint64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _nms(const mshadow::Tensor<gpu, 2>& boxes, const float nms_overlap_thresh, const int rpn_post_nms_top_n, int *keep, int *num_out) { const int threadsPerBlock = sizeof(uint64_t) * 8; const int boxes_num = boxes.size(0); const int boxes_dim = boxes.size(1); float* boxes_dev = boxes.dptr_; uint64_t* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); FRCNN_CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(uint64_t))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); std::vector<uint64_t> mask_host(boxes_num * col_blocks); FRCNN_CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(uint64_t) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<uint64_t> remv(col_blocks); memset(&remv[0], 0, sizeof(uint64_t) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep[num_to_keep++] = i; if (num_to_keep >= rpn_post_nms_top_n) break; uint64_t *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; FRCNN_CUDA_CHECK(cudaFree(mask_dev)); } // copy proposals to output // dets (top_n, 5); keep (top_n, ); out (top_n, ) // count should be top_n (total anchors or proposals) template<typename Dtype> __global__ void PrepareOutput(const int count, const Dtype* dets, const int* keep, const int out_size, const int image_index, Dtype* out, Dtype* score) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { out[index * 5] = image_index; if (index < out_size) { int keep_i = keep[index]; for (int j = 0; j < 4; ++j) { out[index * 5 + j + 1] = dets[keep_i * 5 + j]; } score[index] = dets[keep_i * 5 + 4]; } else { int keep_i = keep[index % out_size]; for (int j = 0; j < 4; ++j) { out[index * 5 + j + 1] = dets[keep_i * 5 + j]; } score[index] = dets[keep_i * 5 + 4]; } } } } // namespace multi_proposal } // namespace cuda } // namespace mshadow namespace mxnet { namespace op { template<typename xpu> class MultiProposalGPUOp : public Operator{ public: explicit MultiProposalGPUOp(MultiProposalParam param) { this->param_ = param; } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; using namespace mshadow::cuda; using namespace mshadow::cuda::multi_proposal; CHECK_EQ(in_data.size(), 3); CHECK_EQ(out_data.size(), 2); CHECK_GT(req.size(), 1); CHECK_EQ(req[proposal::kOut], kWriteTo); /*CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1) << "Sorry, multiple images each device is not implemented.";*/ Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 4> scores = in_data[proposal::kClsProb].get<xpu, 4, real_t>(s); Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s); Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s); Tensor<xpu, 2> out = out_data[proposal::kOut].get<xpu, 2, real_t>(s); Tensor<xpu, 2> out_score = out_data[proposal::kScore].get<xpu, 2, real_t>(s); int num_images = scores.size(0); int num_anchors = scores.size(1) / 2; int height = scores.size(2); int width = scores.size(3); int count_anchors = num_anchors * height * width; // count of total anchors int count = num_images * count_anchors; // set to -1 for max int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count_anchors; rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count_anchors); int rpn_post_nms_top_n = std::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n); // Generate first anchors based on base anchor std::vector<float> base_anchor(4); base_anchor[0] = 0.0; base_anchor[1] = 0.0; base_anchor[2] = param_.feature_stride - 1.0; base_anchor[3] = param_.feature_stride - 1.0; CHECK_EQ(num_anchors, param_.ratios.ndim() * param_.scales.ndim()); std::vector<float> anchors; utils::GenerateAnchors(base_anchor, param_.ratios, param_.scales, &anchors); // Copy generated anchors to GPU float* workspace_proposals_ptr = NULL; FRCNN_CUDA_CHECK(cudaMalloc(&workspace_proposals_ptr, sizeof(float) * num_images * count_anchors * 5)); Tensor<xpu, 3> workspace_proposals(workspace_proposals_ptr, Shape3(num_images, count_anchors, 5)); FRCNN_CUDA_CHECK(cudaMemcpy(workspace_proposals.dptr_, &anchors[0], sizeof(float) * anchors.size(), cudaMemcpyHostToDevice)); // Copy proposals to a mesh grid dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid"); ProposalGridKernel<<<dimGrid, dimBlock>>>( count, num_anchors, height, width, param_.feature_stride, scores.dptr_, workspace_proposals.dptr_); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); // Transform anchors and bbox_deltas into bboxes CheckLaunchParam(dimGrid, dimBlock, "BBoxPred"); if (param_.iou_loss) { IoUPredKernel<<<dimGrid, dimBlock>>>( count, num_anchors, height, width, param_.feature_stride, im_info.dptr_, workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_); } else { BBoxPredKernel<<<dimGrid, dimBlock>>>( count, num_anchors, height, width, param_.feature_stride, im_info.dptr_, workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_); } FRCNN_CUDA_CHECK(cudaPeekAtLastError()); // filter boxes with less than rpn_min_size CheckLaunchParam(dimGrid, dimBlock, "FilterBox"); FilterBoxKernel<<<dimGrid, dimBlock>>>( count, count_anchors, param_.rpn_min_size, im_info.dptr_, workspace_proposals.dptr_); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); dimGrid = dim3((count_anchors + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock); dimBlock = dim3(kMaxThreadsPerBlock); // Copy score to a continuous memory float* score_ptr = NULL; FRCNN_CUDA_CHECK(cudaMalloc(&score_ptr, sizeof(float) * count_anchors)); Tensor<xpu, 1> score(score_ptr, Shape1(count_anchors)); int* order_ptr = NULL; FRCNN_CUDA_CHECK(cudaMalloc(&order_ptr, sizeof(int) * count_anchors)); Tensor<xpu, 1, int> order(order_ptr, Shape1(count_anchors)); float* workspace_ordered_proposals_ptr = NULL; FRCNN_CUDA_CHECK(cudaMalloc(&workspace_ordered_proposals_ptr, sizeof(float) * rpn_pre_nms_top_n * 5)); Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr, Shape2(rpn_pre_nms_top_n, 5)); int* keep; FRCNN_CUDA_CHECK(cudaMalloc(&keep, sizeof(int) * rpn_pre_nms_top_n)); for (int b = 0; b < num_images; b++) { CheckLaunchParam(dimGrid, dimBlock, "CopyScore"); CopyScoreKernel << <dimGrid, dimBlock >> >( count_anchors, workspace_proposals.dptr_ + b * count_anchors * 5, score.dptr_, order.dptr_); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); // argsort score, save order thrust::stable_sort_by_key(thrust::device, score.dptr_, score.dptr_ + score.size(0), order.dptr_, thrust::greater<real_t>()); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); // Reorder proposals according to order dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals"); ReorderProposalsKernel << <dimGrid, dimBlock >> >( rpn_pre_nms_top_n, workspace_proposals.dptr_ + b * count_anchors * 5, order.dptr_, workspace_ordered_proposals.dptr_); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); // perform nms std::vector<int> _keep(workspace_ordered_proposals.size(0)); int out_size = 0; _nms(workspace_ordered_proposals, param_.threshold, rpn_post_nms_top_n, &_keep[0], &out_size); // copy nms result to gpu FRCNN_CUDA_CHECK(cudaMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(), cudaMemcpyHostToDevice)); // copy results after nms dimGrid.x = (param_.rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput"); PrepareOutput << <dimGrid, dimBlock >> >( param_.rpn_post_nms_top_n, workspace_ordered_proposals.dptr_, keep, out_size, b, out.dptr_ + b * param_.rpn_post_nms_top_n * 5, out_score.dptr_ + b * param_.rpn_post_nms_top_n); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); } // free temporary memory FRCNN_CUDA_CHECK(cudaFree(keep)); FRCNN_CUDA_CHECK(cudaFree(workspace_ordered_proposals_ptr)); FRCNN_CUDA_CHECK(cudaFree(workspace_proposals_ptr)); FRCNN_CUDA_CHECK(cudaFree(score_ptr)); FRCNN_CUDA_CHECK(cudaFree(order_ptr)); } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_grad.size(), 3); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s); Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s); Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s); // can not assume the grad would be zero Assign(gscores, req[proposal::kClsProb], 0); Assign(gbbox, req[proposal::kBBoxPred], 0); Assign(ginfo, req[proposal::kImInfo], 0); } private: MultiProposalParam param_; }; // class MultiProposalGPUOp template<> Operator* CreateOp<gpu>(MultiProposalParam param) { return new MultiProposalGPUOp<gpu>(param); } } // namespace op } // namespace mxnet
4d0e640e5b37080e917a721c045ceb56807d25bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void reduce(float* a, int N) { int id = blockIdx.x*blockDim.x + threadIdx.x; if (id<N) { a[2*id]+=a[2*id+1]; __syncthreads(); } } __global__ void force_flush (float4 *f, int N) { int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>=N) return; f[id].x=0.; f[id].y=0.; f[id].z=0.; } __global__ void rand_init (int seed, hiprandStatePhilox4_32_10_t* states) { int id = blockIdx.x*blockDim.x + threadIdx.x; hiprand_init(seed, id, 0, &states[id]); } __global__ void integrate(float4 *r, float4 *forces, int N, hiprandStatePhilox4_32_10_t* states) { int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>=N) return; float4 f=forces[id]; float4 wn = hiprand_normal4(&states[id]); //Gaussian white noise ~N(0,1) //float4 ri=r[id]; float4 ri=tex1Dfetch(r_t, id); float3 dr; dr.x=bd_c.hoz*f.x+bd_c.Gamma*wn.x; dr.y=bd_c.hoz*f.y+bd_c.Gamma*wn.y; dr.z=bd_c.hoz*f.z+bd_c.Gamma*wn.z; ri.x+=dr.x; ri.y+=dr.y; ri.z+=dr.z; ri.w=dr.x*dr.x+dr.y*dr.y+dr.z*dr.z; //Save velocity squared for calculation of diffusion constant / temperature / kinetic energy r[id]=ri; } __global__ void minimize(float4 *r, float4 *forces, int N, float alpha) { int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>=N) return; float4 f=forces[id]; //float4 ri=r[id]; float4 ri=tex1Dfetch(r_t, id); ri.x+=alpha*f.x; ri.y+=alpha*f.y; ri.z+=alpha*f.z; r[id]=ri; } __global__ void FENEForce(float4* r, float4* forces, InteractionList<bond> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float4 f=forces[i]; int Nb=list.count_d[i]; //Number of bonds of the i-th bead for (int ib=0; ib<Nb; ib++) { //Loop over bonds of the i-th bead bond b=list.map_d[ib*list.N+i]; //Look up bond in the map //float4 l=r[b.i2]; //Number of bead on the other end of the bond (i2) and its coordinates (l) float4 l=tex1Dfetch(r_t, b.i2); //(reading from texture cache is faster than directly from r[]) l.x-=ri.x; //Atom-to-bead vector l.y-=ri.y; l.z-=ri.z; l.w=sqrtf(l.x*l.x+l.y*l.y+l.z*l.z); l.w-=b.l0; float denom=(1.-l.w*l.w/fene_c.R02); l.w=fene_c.k*l.w/denom/(l.w+b.l0); f.x+=l.w*l.x; f.y+=l.w*l.y; f.z+=l.w*l.z; } forces[i]=f; } __global__ void FENEEnergy(float4* r, InteractionList<bond> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float energy=0.0f; int Nb=list.count_d[i]; //Number of bonds of the i-th bead //printf("Atom %d (%f %f %f):\n",i,ri.x,ri.y,ri.z); for (int ib=0; ib<Nb; ib++) { //Loop over bonds of the i-th bead bond b=list.map_d[ib*list.N+i]; //Look up bond in the map //float4 l=r[b.i2]; //Number of bead on the other end of the bond (i2) and its coordinates (l) float4 l=tex1Dfetch(r_t, b.i2); //(reading from texture cache is faster than directly from r[]) l.x-=ri.x; //Atom-to-bead vector l.y-=ri.y; l.z-=ri.z; l.w=sqrtf(l.x*l.x+l.y*l.y+l.z*l.z); //printf("%d %d %f %f %f\n",i,b.i2,energy,b.l0,l.w); l.w-=b.l0; l.w=-.5*fene_c.k*fene_c.R02*logf(1.-l.w*l.w/fene_c.R02); //printf("%f\n",l.w); energy+=l.w; } r[i].w=energy; } __global__ void SoftSphereForce(float4 *r, float4 *forces, InteractionList<int> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; float4 f=forces[i]; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); //Sigma of the i-th bead int Nneib=list.count_d[i]; //Number of neighbors of the i-th bead for (int ineib=0;ineib<Nneib;ineib++) { //Loop over neighbors of the i-th bead int j=list.map_d[ineib*list.N+i]; //Look up neibor in the neibor list //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); //float4 r2=tex1Dfetch(r_t,tex1Dfetch(neibmap_t,ineib*list.N+i); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; // sigma of the other bead, and mixed into sigma_ij sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared if (r2.w>ss_c.CutOffFactor2inv) { // Potential is cut off at rcut=CutOffFactor*sigma => sigma^2/r^2 should be > 1/CutOffFactor2 r2.w*=r2.w; // to the 4th r2.w*=r2.w; // to the 8th r2.w=ss_c.Minus6eps/sigma2*(r2.w+ss_c.CutOffFactor8inv); f.x+=r2.x*r2.w; f.y+=r2.y*r2.w; f.z+=r2.z*r2.w; } } forces[i]=f; } __global__ void SoftSphereEnergy(float4 *r, InteractionList<int> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float energy=0.0f; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); //Sigma of the i-th bead int Nneib=list.count_d[i]; //Number of neighbors of the i-th bead for (int ineib=0;ineib<Nneib;ineib++) { //Loop over neighbors of the i-th bead int j=list.map_d[ineib*list.N+i]; //Look up neibor in the neibor list //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); //float4 r2=tex1Dfetch(r_t,tex1Dfetch(neibmap_t,ineib*list.N+i); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared if (r2.w>ss_c.CutOffFactor2inv) // Potential is cut off at rcut=CutOffFactor*sigma => sigma^2/r^2 should be > 1/CutOffFactor2 //energy+=ss_c.eps*r2.w*r2.w*r2.w; // to the 6th energy+=ss_c.eps*(r2.w*r2.w*r2.w-ss_c.CutOffFactor6inv); } r[i].w=energy; } __global__ void NativeSubtractSoftSphereForce(float4* r, float4* forces, InteractionList<nc> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float4 f=forces[i]; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared if (r2.w>ss_c.CutOffFactor2inv) { r2.w*=r2.w; // to the 4th r2.w*=r2.w; // to the 8th r2.w=ss_c.Minus6eps/sigma2*(r2.w+ss_c.CutOffFactor8inv); f.x-=r2.x*r2.w; f.y-=r2.y*r2.w; f.z-=r2.z*r2.w; } } forces[i]=f; } __global__ void NativeSubtractSoftSphereForce(float4* r, float4* forces, InteractionList<nc> list, float *sig, float Delta) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float4 f=forces[i]; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared if (r2.w>ss_c.CutOffFactor2inv) { r2.w*=r2.w; // to the 4th r2.w*=r2.w; // to the 8th r2.w=Delta*ss_c.Minus6eps/sigma2*(r2.w+ss_c.CutOffFactor8inv); f.x-=r2.x*r2.w; f.y-=r2.y*r2.w; f.z-=r2.z*r2.w; } } forces[i]=f; } __global__ void NativeSubtractSoftSphereEnergy(float4 *r, InteractionList<nc> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float energy=0.0f; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared if (r2.w>ss_c.CutOffFactor2inv) energy-=ss_c.eps*(r2.w*r2.w*r2.w-ss_c.CutOffFactor6inv); // to the 6th } r[i].w=energy; } __global__ void NativeForce(float4* r, float4* forces, InteractionList<nc> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); float4 f=forces[i]; int Nnc=list.count_d[i]; for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; //float4 r2=r[ncij.i2]; float4 r2=tex1Dfetch(r_t,ncij.i2); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=ncij.r02/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); float r6inv=r2.w*r2.w*r2.w; r2.w=ncij.factor*r2.w*r6inv*(1-r6inv); f.x+=r2.w*r2.x; f.y+=r2.w*r2.y; f.z+=r2.w*r2.z; } forces[i]=f; } __global__ void NativeForce(float4* r, float4* forces, InteractionList<nc> list, float Delta) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); float4 f=forces[i]; int Nnc=list.count_d[i]; for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; //float4 r2=r[ncij.i2]; float4 r2=tex1Dfetch(r_t,ncij.i2); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=ncij.r02/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); float r6inv=r2.w*r2.w*r2.w; r2.w=Delta*ncij.factor*r2.w*r6inv*(1-r6inv); f.x+=r2.w*r2.x; f.y+=r2.w*r2.y; f.z+=r2.w*r2.z; } forces[i]=f; } __global__ void NativeEnergy(float4* r, InteractionList<nc> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); float energy=0.0f; int Nnc=list.count_d[i]; for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; //float4 r2=r[ncij.i2]; float4 r2=tex1Dfetch(r_t,ncij.i2); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=ncij.r02/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); float r6inv=r2.w*r2.w*r2.w; energy+=ncij.epsilon*r6inv*(r6inv-2.0f); } r[i].w=energy; } __global__ void DebyeHuckelForce(float4* r, float4* forces, InteractionList<bond> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); float4 f=forces[i]; int Nsb=list.count_d[i]; for (int isb=0; isb<Nsb; isb++) { bond sbij=list.map_d[isb*list.N+i]; //float4 r2=r[ncij.i2]; float4 r2=tex1Dfetch(r_t,sbij.i2); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; float dist2=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; float dist=sqrtf(dist2); //if (dist<1.5*els_c.kappainv) { r2.w=expf(-dist/els_c.kappainv)*sbij.l0/dist2; f.x+=r2.w*r2.x; f.y+=r2.w*r2.y; f.z+=r2.w*r2.z; //} } forces[i]=f; } __global__ void DebyeHuckelEnergy(float4* r, InteractionList<bond> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); float energy=0.f; int Nsb=list.count_d[i]; for (int isb=0; isb<Nsb; isb++) { bond sbij=list.map_d[isb*list.N+i]; //float4 r2=r[ncij.i2]; float4 r2=tex1Dfetch(r_t,sbij.i2); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; float dist2=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; float dist=sqrtf(dist2); //if (dist<1.5*els_c.kappainv) energy+=expf(-dist/els_c.kappainv)*sbij.l0/dist; } r[i].w=energy; } __global__ void SoftSphereNeighborList(float4* r, InteractionList<int> list, int Ntraj) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int neighbors=0; for (int j=0;j<list.N;j++) { //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; if (r2.w>ss_c.Rcut2) continue; if (i==j) continue; bool atchainstart=false; for (int chain=1; chain<chainstarts_c[0]; chain++) { atchainstart+=(max(i,j)==chainstarts_c[chain]); } if ( ( (abs(j-i)>1) //include all-nonneighboring beads or ((max(i,j) % Ntraj)>=Ntraj/2) // if one is sidechain, it interacts with any other side chain or backbone or atchainstart //if one bead is the start of a chain, it interacts with any other bead ) and (abs(j-i)!=Ntraj/2) //exclude bb and sc of the same residue and ((i/Ntraj)==(j/Ntraj)) //exclude beads belonging to different trajectories/replicas ) { list.map_d[neighbors*list.N+i]=j; neighbors++; } } list.count_d[i]=neighbors; } __global__ void SoftSphereNeighborList(float4* r, InteractionList<int> list, InteractionList<bond> blist, int Ntraj) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int neighbors=0; int Nb=blist.count_d[i]; for (int j=0;j<list.N;j++) { //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; //Check that i and j are not bonded by looping over bonds of i bool nonbonded=true; for (int ib=0; ib<Nb; ib++) { bond b=blist.map_d[ib*blist.N+i]; if (b.i2==j) nonbonded=false; } if ((r2.w<ss_c.Rcut2) and (i!=j) and (nonbonded) and (i/Ntraj)==(j/Ntraj)) { list.map_d[neighbors*list.N+i]=j; neighbors++; } } list.count_d[i]=neighbors; } __global__ void SoftSphereNeighborList(float4* r, InteractionList<int> intlist, InteractionList<int> neiblist) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=intlist.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int Npartners=intlist.count_d[i]; int neighbors=0; for (int ip=0;ip<Npartners;ip++) { int j=intlist.map_d[ip*intlist.N+i]; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; if (r2.w<ss_c.Rcut2) { neiblist.map_d[neighbors*neiblist.N+i]=j; neighbors++; } } neiblist.count_d[i]=neighbors; }
4d0e640e5b37080e917a721c045ceb56807d25bf.cu
__global__ void reduce(float* a, int N) { int id = blockIdx.x*blockDim.x + threadIdx.x; if (id<N) { a[2*id]+=a[2*id+1]; __syncthreads(); } } __global__ void force_flush (float4 *f, int N) { int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>=N) return; f[id].x=0.; f[id].y=0.; f[id].z=0.; } __global__ void rand_init (int seed, curandStatePhilox4_32_10_t* states) { int id = blockIdx.x*blockDim.x + threadIdx.x; curand_init(seed, id, 0, &states[id]); } __global__ void integrate(float4 *r, float4 *forces, int N, curandStatePhilox4_32_10_t* states) { int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>=N) return; float4 f=forces[id]; float4 wn = curand_normal4(&states[id]); //Gaussian white noise ~N(0,1) //float4 ri=r[id]; float4 ri=tex1Dfetch(r_t, id); float3 dr; dr.x=bd_c.hoz*f.x+bd_c.Gamma*wn.x; dr.y=bd_c.hoz*f.y+bd_c.Gamma*wn.y; dr.z=bd_c.hoz*f.z+bd_c.Gamma*wn.z; ri.x+=dr.x; ri.y+=dr.y; ri.z+=dr.z; ri.w=dr.x*dr.x+dr.y*dr.y+dr.z*dr.z; //Save velocity squared for calculation of diffusion constant / temperature / kinetic energy r[id]=ri; } __global__ void minimize(float4 *r, float4 *forces, int N, float alpha) { int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>=N) return; float4 f=forces[id]; //float4 ri=r[id]; float4 ri=tex1Dfetch(r_t, id); ri.x+=alpha*f.x; ri.y+=alpha*f.y; ri.z+=alpha*f.z; r[id]=ri; } __global__ void FENEForce(float4* r, float4* forces, InteractionList<bond> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float4 f=forces[i]; int Nb=list.count_d[i]; //Number of bonds of the i-th bead for (int ib=0; ib<Nb; ib++) { //Loop over bonds of the i-th bead bond b=list.map_d[ib*list.N+i]; //Look up bond in the map //float4 l=r[b.i2]; //Number of bead on the other end of the bond (i2) and its coordinates (l) float4 l=tex1Dfetch(r_t, b.i2); //(reading from texture cache is faster than directly from r[]) l.x-=ri.x; //Atom-to-bead vector l.y-=ri.y; l.z-=ri.z; l.w=sqrtf(l.x*l.x+l.y*l.y+l.z*l.z); l.w-=b.l0; float denom=(1.-l.w*l.w/fene_c.R02); l.w=fene_c.k*l.w/denom/(l.w+b.l0); f.x+=l.w*l.x; f.y+=l.w*l.y; f.z+=l.w*l.z; } forces[i]=f; } __global__ void FENEEnergy(float4* r, InteractionList<bond> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float energy=0.0f; int Nb=list.count_d[i]; //Number of bonds of the i-th bead //printf("Atom %d (%f %f %f):\n",i,ri.x,ri.y,ri.z); for (int ib=0; ib<Nb; ib++) { //Loop over bonds of the i-th bead bond b=list.map_d[ib*list.N+i]; //Look up bond in the map //float4 l=r[b.i2]; //Number of bead on the other end of the bond (i2) and its coordinates (l) float4 l=tex1Dfetch(r_t, b.i2); //(reading from texture cache is faster than directly from r[]) l.x-=ri.x; //Atom-to-bead vector l.y-=ri.y; l.z-=ri.z; l.w=sqrtf(l.x*l.x+l.y*l.y+l.z*l.z); //printf("%d %d %f %f %f\n",i,b.i2,energy,b.l0,l.w); l.w-=b.l0; l.w=-.5*fene_c.k*fene_c.R02*logf(1.-l.w*l.w/fene_c.R02); //printf("%f\n",l.w); energy+=l.w; } r[i].w=energy; } __global__ void SoftSphereForce(float4 *r, float4 *forces, InteractionList<int> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; float4 f=forces[i]; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); //Sigma of the i-th bead int Nneib=list.count_d[i]; //Number of neighbors of the i-th bead for (int ineib=0;ineib<Nneib;ineib++) { //Loop over neighbors of the i-th bead int j=list.map_d[ineib*list.N+i]; //Look up neibor in the neibor list //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); //float4 r2=tex1Dfetch(r_t,tex1Dfetch(neibmap_t,ineib*list.N+i); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; // sigma of the other bead, and mixed into sigma_ij sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared if (r2.w>ss_c.CutOffFactor2inv) { // Potential is cut off at rcut=CutOffFactor*sigma => sigma^2/r^2 should be > 1/CutOffFactor2 r2.w*=r2.w; // to the 4th r2.w*=r2.w; // to the 8th r2.w=ss_c.Minus6eps/sigma2*(r2.w+ss_c.CutOffFactor8inv); f.x+=r2.x*r2.w; f.y+=r2.y*r2.w; f.z+=r2.z*r2.w; } } forces[i]=f; } __global__ void SoftSphereEnergy(float4 *r, InteractionList<int> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float energy=0.0f; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); //Sigma of the i-th bead int Nneib=list.count_d[i]; //Number of neighbors of the i-th bead for (int ineib=0;ineib<Nneib;ineib++) { //Loop over neighbors of the i-th bead int j=list.map_d[ineib*list.N+i]; //Look up neibor in the neibor list //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); //float4 r2=tex1Dfetch(r_t,tex1Dfetch(neibmap_t,ineib*list.N+i); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared if (r2.w>ss_c.CutOffFactor2inv) // Potential is cut off at rcut=CutOffFactor*sigma => sigma^2/r^2 should be > 1/CutOffFactor2 //energy+=ss_c.eps*r2.w*r2.w*r2.w; // to the 6th energy+=ss_c.eps*(r2.w*r2.w*r2.w-ss_c.CutOffFactor6inv); } r[i].w=energy; } __global__ void NativeSubtractSoftSphereForce(float4* r, float4* forces, InteractionList<nc> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float4 f=forces[i]; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared if (r2.w>ss_c.CutOffFactor2inv) { r2.w*=r2.w; // to the 4th r2.w*=r2.w; // to the 8th r2.w=ss_c.Minus6eps/sigma2*(r2.w+ss_c.CutOffFactor8inv); f.x-=r2.x*r2.w; f.y-=r2.y*r2.w; f.z-=r2.z*r2.w; } } forces[i]=f; } __global__ void NativeSubtractSoftSphereForce(float4* r, float4* forces, InteractionList<nc> list, float *sig, float Delta) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float4 f=forces[i]; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared if (r2.w>ss_c.CutOffFactor2inv) { r2.w*=r2.w; // to the 4th r2.w*=r2.w; // to the 8th r2.w=Delta*ss_c.Minus6eps/sigma2*(r2.w+ss_c.CutOffFactor8inv); f.x-=r2.x*r2.w; f.y-=r2.y*r2.w; f.z-=r2.z*r2.w; } } forces[i]=f; } __global__ void NativeSubtractSoftSphereEnergy(float4 *r, InteractionList<nc> list, float *sig) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t, i); float energy=0.0f; int Nnc=list.count_d[i]; //float sigi=sig[i]; float sigi=tex1Dfetch(sig_t,i); for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; int j=ncij.i2; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; //float sigma2=(sigi+sig[j])/2.; float sigma2=(sigi+tex1Dfetch(sig_t,j))/2.; sigma2*=sigma2; r2.w=sigma2/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); // squared if (r2.w>ss_c.CutOffFactor2inv) energy-=ss_c.eps*(r2.w*r2.w*r2.w-ss_c.CutOffFactor6inv); // to the 6th } r[i].w=energy; } __global__ void NativeForce(float4* r, float4* forces, InteractionList<nc> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); float4 f=forces[i]; int Nnc=list.count_d[i]; for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; //float4 r2=r[ncij.i2]; float4 r2=tex1Dfetch(r_t,ncij.i2); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=ncij.r02/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); float r6inv=r2.w*r2.w*r2.w; r2.w=ncij.factor*r2.w*r6inv*(1-r6inv); f.x+=r2.w*r2.x; f.y+=r2.w*r2.y; f.z+=r2.w*r2.z; } forces[i]=f; } __global__ void NativeForce(float4* r, float4* forces, InteractionList<nc> list, float Delta) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); float4 f=forces[i]; int Nnc=list.count_d[i]; for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; //float4 r2=r[ncij.i2]; float4 r2=tex1Dfetch(r_t,ncij.i2); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=ncij.r02/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); float r6inv=r2.w*r2.w*r2.w; r2.w=Delta*ncij.factor*r2.w*r6inv*(1-r6inv); f.x+=r2.w*r2.x; f.y+=r2.w*r2.y; f.z+=r2.w*r2.z; } forces[i]=f; } __global__ void NativeEnergy(float4* r, InteractionList<nc> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); float energy=0.0f; int Nnc=list.count_d[i]; for (int inc=0; inc<Nnc; inc++) { nc ncij=list.map_d[inc*list.N+i]; //float4 r2=r[ncij.i2]; float4 r2=tex1Dfetch(r_t,ncij.i2); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=ncij.r02/(r2.x*r2.x+r2.y*r2.y+r2.z*r2.z); float r6inv=r2.w*r2.w*r2.w; energy+=ncij.epsilon*r6inv*(r6inv-2.0f); } r[i].w=energy; } __global__ void DebyeHuckelForce(float4* r, float4* forces, InteractionList<bond> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); float4 f=forces[i]; int Nsb=list.count_d[i]; for (int isb=0; isb<Nsb; isb++) { bond sbij=list.map_d[isb*list.N+i]; //float4 r2=r[ncij.i2]; float4 r2=tex1Dfetch(r_t,sbij.i2); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; float dist2=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; float dist=sqrtf(dist2); //if (dist<1.5*els_c.kappainv) { r2.w=expf(-dist/els_c.kappainv)*sbij.l0/dist2; f.x+=r2.w*r2.x; f.y+=r2.w*r2.y; f.z+=r2.w*r2.z; //} } forces[i]=f; } __global__ void DebyeHuckelEnergy(float4* r, InteractionList<bond> list) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); float energy=0.f; int Nsb=list.count_d[i]; for (int isb=0; isb<Nsb; isb++) { bond sbij=list.map_d[isb*list.N+i]; //float4 r2=r[ncij.i2]; float4 r2=tex1Dfetch(r_t,sbij.i2); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; float dist2=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; float dist=sqrtf(dist2); //if (dist<1.5*els_c.kappainv) energy+=expf(-dist/els_c.kappainv)*sbij.l0/dist; } r[i].w=energy; } __global__ void SoftSphereNeighborList(float4* r, InteractionList<int> list, int Ntraj) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int neighbors=0; for (int j=0;j<list.N;j++) { //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; if (r2.w>ss_c.Rcut2) continue; if (i==j) continue; bool atchainstart=false; for (int chain=1; chain<chainstarts_c[0]; chain++) { atchainstart+=(max(i,j)==chainstarts_c[chain]); } if ( ( (abs(j-i)>1) //include all-nonneighboring beads or ((max(i,j) % Ntraj)>=Ntraj/2) // if one is sidechain, it interacts with any other side chain or backbone or atchainstart //if one bead is the start of a chain, it interacts with any other bead ) and (abs(j-i)!=Ntraj/2) //exclude bb and sc of the same residue and ((i/Ntraj)==(j/Ntraj)) //exclude beads belonging to different trajectories/replicas ) { list.map_d[neighbors*list.N+i]=j; neighbors++; } } list.count_d[i]=neighbors; } __global__ void SoftSphereNeighborList(float4* r, InteractionList<int> list, InteractionList<bond> blist, int Ntraj) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=list.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int neighbors=0; int Nb=blist.count_d[i]; for (int j=0;j<list.N;j++) { //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; //Check that i and j are not bonded by looping over bonds of i bool nonbonded=true; for (int ib=0; ib<Nb; ib++) { bond b=blist.map_d[ib*blist.N+i]; if (b.i2==j) nonbonded=false; } if ((r2.w<ss_c.Rcut2) and (i!=j) and (nonbonded) and (i/Ntraj)==(j/Ntraj)) { list.map_d[neighbors*list.N+i]=j; neighbors++; } } list.count_d[i]=neighbors; } __global__ void SoftSphereNeighborList(float4* r, InteractionList<int> intlist, InteractionList<int> neiblist) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=intlist.N) return; //float4 ri=r[i]; float4 ri=tex1Dfetch(r_t,i); int Npartners=intlist.count_d[i]; int neighbors=0; for (int ip=0;ip<Npartners;ip++) { int j=intlist.map_d[ip*intlist.N+i]; //float4 r2=r[j]; float4 r2=tex1Dfetch(r_t,j); r2.x-=ri.x; r2.y-=ri.y; r2.z-=ri.z; r2.w=r2.x*r2.x+r2.y*r2.y+r2.z*r2.z; if (r2.w<ss_c.Rcut2) { neiblist.map_d[neighbors*neiblist.N+i]=j; neighbors++; } } neiblist.count_d[i]=neighbors; }
f35ee2d81e8e388afb8cf8d54142c8be55c7b75c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @author Raffaele Solca @author Mark Gates @generated from magmablas/zlaset_band.cu, normal z -> s, Sun Nov 20 20:20:29 2016 */ #include "magma_internal.h" #define NB 64 /******************************************************************************/ /* GPU kernel for setting the k-1 super-diagonals to OFFDIAG and the main diagonal to DIAG. Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns, with k threads in each block. Each thread iterates across one diagonal. Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc. block 0 block 1 0 => skip above matrix 1 0 => skip above matrix 2 1 0 => skip above matrix [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 | 0 ] [ 3 2 | 1 0 ] [ 3 | 2 1 0 ] [ | 3 2 1 0 ] [ | 3 2 1 ] | 3 2 => skip below matrix 3 => skip below matrix Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel. */ __global__ void slaset_band_upper( int m, int n, float offdiag, float diag, float *A, int lda) { int k = blockDim.x; int ibx = blockIdx.x * NB; int ind = ibx + threadIdx.x - k + 1; A += ind + ibx*lda; float value = offdiag; if (threadIdx.x == k-1) value = diag; #pragma unroll for (int j=0; j < NB; j++) { if (ibx + j < n && ind + j >= 0 && ind + j < m) { A[j*(lda+1)] = value; } } } /******************************************************************************/ /* GPU kernel for setting the k-1 sub-diagonals to OFFDIAG and the main diagonal to DIAG. Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns, with k threads in each block. Each thread iterates across one diagonal. Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc. block 0 block 1 [ 0 | ] [ 1 0 | ] [ 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 | 0 ] [ 3 2 | 1 0 ] [ 3 | 2 1 0 ] [ 3 2 1 0 ] [ 3 2 1 ] 3 2 => skip below matrix 3 => skip below matrix Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel. */ __global__ void slaset_band_lower( int m, int n, float offdiag, float diag, float *A, int lda) { //int k = blockDim.x; int ibx = blockIdx.x * NB; int ind = ibx + threadIdx.x; A += ind + ibx*lda; float value = offdiag; if (threadIdx.x == 0) value = diag; #pragma unroll for (int j=0; j < NB; j++) { if (ibx + j < n && ind + j < m) { A[j*(lda+1)] = value; } } } /***************************************************************************//** Purpose ------- SLASET_BAND initializes the main diagonal of dA to DIAG, and the K-1 sub- or super-diagonals to OFFDIAG. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be set. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] k INTEGER The number of diagonals to set, including the main diagonal. K >= 0. Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block). @param[in] offdiag REAL Off-diagonal elements in the band are set to OFFDIAG. @param[in] diag REAL All the main diagonal elements are set to DIAG. @param[in] dA REAL array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k; and A(i,i) = BETA, 1 <= i <= min(m,n) @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Stream to execute SLASET in. @ingroup magma_laset_band *******************************************************************************/ extern "C" void magmablas_slaset_band( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k, float offdiag, float diag, magmaFloat_ptr dA, magma_int_t ldda, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( k < 0 || k > 1024 ) info = -4; else if ( ldda < max(1,m) ) info = -6; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if (uplo == MagmaUpper) { dim3 threads( min(k,n) ); dim3 grid( magma_ceildiv( min(m+k-1,n), NB ) ); hipLaunchKernelGGL(( slaset_band_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dA, ldda); } else if (uplo == MagmaLower) { dim3 threads( min(k,m) ); dim3 grid( magma_ceildiv( min(m,n), NB ) ); hipLaunchKernelGGL(( slaset_band_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dA, ldda); } }
f35ee2d81e8e388afb8cf8d54142c8be55c7b75c.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @author Raffaele Solca @author Mark Gates @generated from magmablas/zlaset_band.cu, normal z -> s, Sun Nov 20 20:20:29 2016 */ #include "magma_internal.h" #define NB 64 /******************************************************************************/ /* GPU kernel for setting the k-1 super-diagonals to OFFDIAG and the main diagonal to DIAG. Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns, with k threads in each block. Each thread iterates across one diagonal. Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc. block 0 block 1 0 => skip above matrix 1 0 => skip above matrix 2 1 0 => skip above matrix [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 | 0 ] [ 3 2 | 1 0 ] [ 3 | 2 1 0 ] [ | 3 2 1 0 ] [ | 3 2 1 ] | 3 2 => skip below matrix 3 => skip below matrix Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel. */ __global__ void slaset_band_upper( int m, int n, float offdiag, float diag, float *A, int lda) { int k = blockDim.x; int ibx = blockIdx.x * NB; int ind = ibx + threadIdx.x - k + 1; A += ind + ibx*lda; float value = offdiag; if (threadIdx.x == k-1) value = diag; #pragma unroll for (int j=0; j < NB; j++) { if (ibx + j < n && ind + j >= 0 && ind + j < m) { A[j*(lda+1)] = value; } } } /******************************************************************************/ /* GPU kernel for setting the k-1 sub-diagonals to OFFDIAG and the main diagonal to DIAG. Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns, with k threads in each block. Each thread iterates across one diagonal. Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc. block 0 block 1 [ 0 | ] [ 1 0 | ] [ 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 0 | ] [ 3 2 1 | 0 ] [ 3 2 | 1 0 ] [ 3 | 2 1 0 ] [ 3 2 1 0 ] [ 3 2 1 ] 3 2 => skip below matrix 3 => skip below matrix Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel. */ __global__ void slaset_band_lower( int m, int n, float offdiag, float diag, float *A, int lda) { //int k = blockDim.x; int ibx = blockIdx.x * NB; int ind = ibx + threadIdx.x; A += ind + ibx*lda; float value = offdiag; if (threadIdx.x == 0) value = diag; #pragma unroll for (int j=0; j < NB; j++) { if (ibx + j < n && ind + j < m) { A[j*(lda+1)] = value; } } } /***************************************************************************//** Purpose ------- SLASET_BAND initializes the main diagonal of dA to DIAG, and the K-1 sub- or super-diagonals to OFFDIAG. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be set. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] k INTEGER The number of diagonals to set, including the main diagonal. K >= 0. Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block). @param[in] offdiag REAL Off-diagonal elements in the band are set to OFFDIAG. @param[in] diag REAL All the main diagonal elements are set to DIAG. @param[in] dA REAL array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k; and A(i,i) = BETA, 1 <= i <= min(m,n) @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Stream to execute SLASET in. @ingroup magma_laset_band *******************************************************************************/ extern "C" void magmablas_slaset_band( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k, float offdiag, float diag, magmaFloat_ptr dA, magma_int_t ldda, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( k < 0 || k > 1024 ) info = -4; else if ( ldda < max(1,m) ) info = -6; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if (uplo == MagmaUpper) { dim3 threads( min(k,n) ); dim3 grid( magma_ceildiv( min(m+k-1,n), NB ) ); slaset_band_upper<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dA, ldda); } else if (uplo == MagmaLower) { dim3 threads( min(k,m) ); dim3 grid( magma_ceildiv( min(m,n), NB ) ); slaset_band_lower<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dA, ldda); } }
ffd65ffac04f4324b9025c3c611f344857d6d474.hip
// !!! This is a file automatically generated by hipify!!! /***************************************************************************** * * MULTIANS - Massively parallel ANS decoding on GPUs * * released under LGPL-3.0 * * 2017-2019 Andr Weienberger * *****************************************************************************/ #include <cassert> #include <chrono> #include <random> #include <algorithm> #include <iostream> #include <iomanip> #include "multians.h" // encoder configuration // #define NUM_SYMBOLS 256 #define NUM_STATES 1024 // seed for PRNG to generate random test data #define SEED 5 // decoder configuration // // SUBSEQUENCE_SIZE must be a multiple of 4 #define SUBSEQUENCE_SIZE 4 // number of GPU threads per thread block // #define THREADS_PER_BLOCK 128 void run(long int input_size) { // print column headers std::cout << "\u03BB | compressed size (bytes) | "; std::cout << std::endl << std::endl; auto start = std::chrono::steady_clock::now(); for(float lambda = 0.1f; lambda < 2.5f; lambda += 0.16) { // vectors to record timings std::cout << std::left << std::setw(5) << lambda << std::setfill(' '); // generate random, exponentially distributed data auto dist = ANSTableGenerator::generate_distribution( SEED, NUM_SYMBOLS, NUM_STATES, [&](double x) {return lambda * exp(-lambda * x);}); auto random_data = ANSTableGenerator::generate_test_data( dist.dist, input_size, NUM_STATES, SEED); // create an ANS table, based on the distribution auto table = ANSTableGenerator::generate_table( dist.prob, dist.dist, nullptr, NUM_SYMBOLS, NUM_STATES); // derive an encoder table from the ANS table auto encoder_table = ANSTableGenerator::generate_encoder_table(table); // derive a decoder table from the ANS table auto decoder_table = ANSTableGenerator::get_decoder_table(encoder_table); // tANS-encode the generated data using the encoder table auto input_buffer = ANSEncoder::encode( random_data->data(), input_size, encoder_table); // allocate buffer for the decoded output auto output_buffer = std::make_shared<CUHDOutputBuffer>(input_size); // allocate device buffer for compressed input size_t compressed_size = input_buffer->get_compressed_size(); size_t input_buffer_bytes = sizeof(UNIT_TYPE) * (compressed_size + 4); UNIT_TYPE* d_input_buffer; hipMalloc((void**)&d_input_buffer, input_buffer_bytes); hipMemcpy(d_input_buffer, input_buffer->get_compressed_data(), input_buffer_bytes, hipMemcpyHostToDevice); // allocate device buffer for coding table size_t decoder_table_size = decoder_table->get_size() * sizeof(CUHDCodetableItem); std::uint32_t *d_decoder_table; hipMalloc((void**)&d_decoder_table, decoder_table_size); hipMemcpy(d_decoder_table, reinterpret_cast<std::uint32_t*>(decoder_table->get()), decoder_table_size, hipMemcpyHostToDevice); //cast the type CUHDCodetableItem // allocate device buffer for decompressed output size_t output_buffer_bytes = sizeof(SYMBOL_TYPE) * output_buffer->get_uncompressed_size(); SYMBOL_TYPE* d_output_buffer; hipMalloc((void**)&d_output_buffer, output_buffer_bytes); size_t num_subseq = SDIV(compressed_size, SUBSEQUENCE_SIZE); size_t num_blocks = SDIV(num_subseq, THREADS_PER_BLOCK); // allocate device buffer for subsequence synchronization // Note the original type is cuhd::CUHDSubsequenceSyncPoint (uint4 is equivalent) uint4 *d_sync_info; hipMalloc((void**) &d_sync_info, num_subseq * sizeof(uint4)); hipMemset(d_sync_info, 0, num_subseq * sizeof(uint4)); // allocate device buffer for size of output for each subsequence std::uint32_t *d_output_sizes; hipMalloc((void**) &d_output_sizes, num_subseq * sizeof(std::uint32_t)); // allocate device buffer for indicating inter-sequence synchronisation std::uint8_t *d_sequence_synced; hipMalloc((void**)&d_sequence_synced, num_blocks * sizeof(std::uint8_t)); hipMemset(d_sequence_synced, 0, num_blocks * sizeof(std::uint8_t)); std::uint8_t* h_sequence_synced = (std::uint8_t*) malloc(num_blocks * sizeof(std::uint8_t)); // decode the compressed data on a GPU cuhd::CUHDGPUDecoder::decode( d_input_buffer, input_buffer->get_compressed_size(), d_output_buffer, output_buffer->get_uncompressed_size(), d_decoder_table, d_sync_info, d_output_sizes, d_sequence_synced, h_sequence_synced, input_buffer->get_first_state(), input_buffer->get_first_bit(), decoder_table->get_num_entries(), 11, SUBSEQUENCE_SIZE, THREADS_PER_BLOCK); // copy decompressed output from the GPU to the host system hipMemcpy(output_buffer->get_decompressed_data().get(), d_output_buffer, output_buffer_bytes, hipMemcpyDeviceToHost); // reverse all bytes output_buffer->reverse(); // check for errors in decompressed data if(cuhd::CUHDUtil::equals(random_data->data(), output_buffer->get_decompressed_data().get(), input_size)); else std::cout << "********* MISMATCH ************" << std::endl; // print compressed size (bytes) std::cout << std::left << std::setw(10) << input_buffer->get_compressed_size() * sizeof(UNIT_TYPE) << std::setfill(' ') << std::endl; hipFree(d_input_buffer); hipFree(d_output_buffer); hipFree(d_decoder_table); hipFree(d_sync_info); hipFree(d_output_sizes); hipFree(d_sequence_synced); free(h_sequence_synced); } auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); std::cout << "Total elapsed time " << time * 1e-9f << " (s)\n"; } int main(int argc, char **argv) { // name of the binary file const char* bin = argv[0]; auto print_help = [&]() { std::cout << "USAGE: " << bin << "<size of input in megabytes> " << std::endl; }; if(argc < 2) {print_help(); return 1;} // input size in MB const long int size = atoi(argv[1]) * 1024 * 1024; if(size < 1) { print_help(); return 1; } // SUBSEQUENCE_SIZE must be a multiple of 4 assert(SUBSEQUENCE_SIZE % 4 == 0); // run the test run(size); return 0; }
ffd65ffac04f4324b9025c3c611f344857d6d474.cu
/***************************************************************************** * * MULTIANS - Massively parallel ANS decoding on GPUs * * released under LGPL-3.0 * * 2017-2019 André Weißenberger * *****************************************************************************/ #include <cassert> #include <chrono> #include <random> #include <algorithm> #include <iostream> #include <iomanip> #include "multians.h" // encoder configuration // #define NUM_SYMBOLS 256 #define NUM_STATES 1024 // seed for PRNG to generate random test data #define SEED 5 // decoder configuration // // SUBSEQUENCE_SIZE must be a multiple of 4 #define SUBSEQUENCE_SIZE 4 // number of GPU threads per thread block // #define THREADS_PER_BLOCK 128 void run(long int input_size) { // print column headers std::cout << "\u03BB | compressed size (bytes) | "; std::cout << std::endl << std::endl; auto start = std::chrono::steady_clock::now(); for(float lambda = 0.1f; lambda < 2.5f; lambda += 0.16) { // vectors to record timings std::cout << std::left << std::setw(5) << lambda << std::setfill(' '); // generate random, exponentially distributed data auto dist = ANSTableGenerator::generate_distribution( SEED, NUM_SYMBOLS, NUM_STATES, [&](double x) {return lambda * exp(-lambda * x);}); auto random_data = ANSTableGenerator::generate_test_data( dist.dist, input_size, NUM_STATES, SEED); // create an ANS table, based on the distribution auto table = ANSTableGenerator::generate_table( dist.prob, dist.dist, nullptr, NUM_SYMBOLS, NUM_STATES); // derive an encoder table from the ANS table auto encoder_table = ANSTableGenerator::generate_encoder_table(table); // derive a decoder table from the ANS table auto decoder_table = ANSTableGenerator::get_decoder_table(encoder_table); // tANS-encode the generated data using the encoder table auto input_buffer = ANSEncoder::encode( random_data->data(), input_size, encoder_table); // allocate buffer for the decoded output auto output_buffer = std::make_shared<CUHDOutputBuffer>(input_size); // allocate device buffer for compressed input size_t compressed_size = input_buffer->get_compressed_size(); size_t input_buffer_bytes = sizeof(UNIT_TYPE) * (compressed_size + 4); UNIT_TYPE* d_input_buffer; cudaMalloc((void**)&d_input_buffer, input_buffer_bytes); cudaMemcpy(d_input_buffer, input_buffer->get_compressed_data(), input_buffer_bytes, cudaMemcpyHostToDevice); // allocate device buffer for coding table size_t decoder_table_size = decoder_table->get_size() * sizeof(CUHDCodetableItem); std::uint32_t *d_decoder_table; cudaMalloc((void**)&d_decoder_table, decoder_table_size); cudaMemcpy(d_decoder_table, reinterpret_cast<std::uint32_t*>(decoder_table->get()), decoder_table_size, cudaMemcpyHostToDevice); //cast the type CUHDCodetableItem // allocate device buffer for decompressed output size_t output_buffer_bytes = sizeof(SYMBOL_TYPE) * output_buffer->get_uncompressed_size(); SYMBOL_TYPE* d_output_buffer; cudaMalloc((void**)&d_output_buffer, output_buffer_bytes); size_t num_subseq = SDIV(compressed_size, SUBSEQUENCE_SIZE); size_t num_blocks = SDIV(num_subseq, THREADS_PER_BLOCK); // allocate device buffer for subsequence synchronization // Note the original type is cuhd::CUHDSubsequenceSyncPoint (uint4 is equivalent) uint4 *d_sync_info; cudaMalloc((void**) &d_sync_info, num_subseq * sizeof(uint4)); cudaMemset(d_sync_info, 0, num_subseq * sizeof(uint4)); // allocate device buffer for size of output for each subsequence std::uint32_t *d_output_sizes; cudaMalloc((void**) &d_output_sizes, num_subseq * sizeof(std::uint32_t)); // allocate device buffer for indicating inter-sequence synchronisation std::uint8_t *d_sequence_synced; cudaMalloc((void**)&d_sequence_synced, num_blocks * sizeof(std::uint8_t)); cudaMemset(d_sequence_synced, 0, num_blocks * sizeof(std::uint8_t)); std::uint8_t* h_sequence_synced = (std::uint8_t*) malloc(num_blocks * sizeof(std::uint8_t)); // decode the compressed data on a GPU cuhd::CUHDGPUDecoder::decode( d_input_buffer, input_buffer->get_compressed_size(), d_output_buffer, output_buffer->get_uncompressed_size(), d_decoder_table, d_sync_info, d_output_sizes, d_sequence_synced, h_sequence_synced, input_buffer->get_first_state(), input_buffer->get_first_bit(), decoder_table->get_num_entries(), 11, SUBSEQUENCE_SIZE, THREADS_PER_BLOCK); // copy decompressed output from the GPU to the host system cudaMemcpy(output_buffer->get_decompressed_data().get(), d_output_buffer, output_buffer_bytes, cudaMemcpyDeviceToHost); // reverse all bytes output_buffer->reverse(); // check for errors in decompressed data if(cuhd::CUHDUtil::equals(random_data->data(), output_buffer->get_decompressed_data().get(), input_size)); else std::cout << "********* MISMATCH ************" << std::endl; // print compressed size (bytes) std::cout << std::left << std::setw(10) << input_buffer->get_compressed_size() * sizeof(UNIT_TYPE) << std::setfill(' ') << std::endl; cudaFree(d_input_buffer); cudaFree(d_output_buffer); cudaFree(d_decoder_table); cudaFree(d_sync_info); cudaFree(d_output_sizes); cudaFree(d_sequence_synced); free(h_sequence_synced); } auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); std::cout << "Total elapsed time " << time * 1e-9f << " (s)\n"; } int main(int argc, char **argv) { // name of the binary file const char* bin = argv[0]; auto print_help = [&]() { std::cout << "USAGE: " << bin << "<size of input in megabytes> " << std::endl; }; if(argc < 2) {print_help(); return 1;} // input size in MB const long int size = atoi(argv[1]) * 1024 * 1024; if(size < 1) { print_help(); return 1; } // SUBSEQUENCE_SIZE must be a multiple of 4 assert(SUBSEQUENCE_SIZE % 4 == 0); // run the test run(size); return 0; }
c7742f8f4f41bba78b2856f0b48535dde8c8bc6a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> __global__ void cube(float * d_out, float * d_in){ // Todo: Fill in this function int id = threadIdx.x; float num = d_in[id]; d_out[id] = num*num*num; } int main(int argc, char ** argv) { // NOTE: h is for host and d is for device // This is the general template of cuda code // 1. CPU allocate memory in the device. // 2. CPU copy data from the host structs to the device structs // 3. CPU runs the kernel on the GPU // 4. CPU copies the data from the device struct to the host struct const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory hipMalloc((void**) &d_in, ARRAY_BYTES); hipMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); // launch the kernel // The first is the number of blocks and the second is the number of threads per block // Can run many blocks at once. // Max number of threads per block is 512 for old and 1024 for new hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in); // copy back the result array to the CPU hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } hipFree(d_in); hipFree(d_out); return 0; }
c7742f8f4f41bba78b2856f0b48535dde8c8bc6a.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> __global__ void cube(float * d_out, float * d_in){ // Todo: Fill in this function int id = threadIdx.x; float num = d_in[id]; d_out[id] = num*num*num; } int main(int argc, char ** argv) { // NOTE: h is for host and d is for device // This is the general template of cuda code // 1. CPU allocate memory in the device. // 2. CPU copy data from the host structs to the device structs // 3. CPU runs the kernel on the GPU // 4. CPU copies the data from the device struct to the host struct const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel // The first is the number of blocks and the second is the number of threads per block // Can run many blocks at once. // Max number of threads per block is 512 for old and 1024 for new cube<<<1, ARRAY_SIZE>>>(d_out, d_in); // copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
278e5588260d8811b8580f4a45fc7f6a9ad80cf0.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2017-2018 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <hip/hip_runtime.h> #include "NvCodecUtils.h" template<typename YuvUnitx2> static __global__ void Resize(hipTextureObject_t texY, hipTextureObject_t texUv, uint8_t *pDst, uint8_t *pDstUV, int nPitch, int nWidth, int nHeight, float fxScale, float fyScale) { int ix = blockIdx.x * blockDim.x + threadIdx.x, iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= nWidth / 2 || iy >= nHeight / 2) { return; } int x = ix * 2, y = iy * 2; typedef decltype(YuvUnitx2::x) YuvUnit; const int MAX = 1 << (sizeof(YuvUnit) * 8); *(YuvUnitx2 *)(pDst + y * nPitch + x * sizeof(YuvUnit)) = YuvUnitx2 { (YuvUnit)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX), (YuvUnit)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX) }; y++; *(YuvUnitx2 *)(pDst + y * nPitch + x * sizeof(YuvUnit)) = YuvUnitx2 { (YuvUnit)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX), (YuvUnit)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX) }; float2 uv = tex2D<float2>(texUv, ix / fxScale, (nHeight + iy) / fyScale + 0.5f); *(YuvUnitx2 *)(pDstUV + iy * nPitch + ix * 2 * sizeof(YuvUnit)) = YuvUnitx2{ (YuvUnit)(uv.x * MAX), (YuvUnit)(uv.y * MAX) }; } template <typename YuvUnitx2> static void Resize(unsigned char *dpDst, unsigned char* dpDstUV, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight) { hipResourceDesc resDesc = {}; resDesc.resType = hipResourceTypePitch2D; resDesc.res.pitch2D.devPtr = dpSrc; resDesc.res.pitch2D.desc = hipCreateChannelDesc<decltype(YuvUnitx2::x)>(); resDesc.res.pitch2D.width = nSrcWidth; resDesc.res.pitch2D.height = nSrcHeight; resDesc.res.pitch2D.pitchInBytes = nSrcPitch; hipTextureDesc texDesc = {}; texDesc.filterMode = hipFilterModeLinear; texDesc.readMode = hipReadModeNormalizedFloat; hipTextureObject_t texY=0; ck(hipCreateTextureObject(&texY, &resDesc, &texDesc, NULL)); resDesc.res.pitch2D.desc = hipCreateChannelDesc<YuvUnitx2>(); resDesc.res.pitch2D.width = nSrcWidth / 2; resDesc.res.pitch2D.height = nSrcHeight * 3 / 2; hipTextureObject_t texUv=0; ck(hipCreateTextureObject(&texUv, &resDesc, &texDesc, NULL)); Resize<YuvUnitx2> << <dim3((nDstWidth + 31) / 32, (nDstHeight + 31) / 32), dim3(16, 16) >> >(texY, texUv, dpDst, dpDstUV, nDstPitch, nDstWidth, nDstHeight, 1.0f * nDstWidth / nSrcWidth, 1.0f * nDstHeight / nSrcHeight); ck(hipDestroyTextureObject(texY)); ck(hipDestroyTextureObject(texUv)); } void ResizeNv12(unsigned char *dpDstNv12, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcNv12, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstNv12UV) { unsigned char* dpDstUV = dpDstNv12UV ? dpDstNv12UV : dpDstNv12 + (nDstPitch*nDstHeight); return Resize<uchar2>(dpDstNv12, dpDstUV, nDstPitch, nDstWidth, nDstHeight, dpSrcNv12, nSrcPitch, nSrcWidth, nSrcHeight); } void ResizeP016(unsigned char *dpDstP016, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcP016, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstP016UV) { unsigned char* dpDstUV = dpDstP016UV ? dpDstP016UV : dpDstP016 + (nDstPitch*nDstHeight); return Resize<ushort2>(dpDstP016, dpDstUV, nDstPitch, nDstWidth, nDstHeight, dpSrcP016, nSrcPitch, nSrcWidth, nSrcHeight); } static __global__ void Scale(hipTextureObject_t texSrc, uint8_t *pDst, int nPitch, int nWidth, int nHeight, float fxScale, float fyScale) { int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= nWidth || y >= nHeight) { return; } *(unsigned char*)(pDst + (y * nPitch) + x) = (unsigned char)(fminf((tex2D<float>(texSrc, x * fxScale, y * fyScale)) * 255.0f, 255.0f)); } static __global__ void Scale_uv(hipTextureObject_t texSrc, uint8_t *pDst, int nPitch, int nWidth, int nHeight, float fxScale, float fyScale) { int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= nWidth || y >= nHeight) { return; } float2 uv = tex2D<float2>(texSrc, x * fxScale, y * fyScale); uchar2 uvOut = uchar2{ (unsigned char)(fminf(uv.x * 255.0f, 255.0f)), (unsigned char)(fminf(uv.y * 255.0f, 255.0f)) }; *(uchar2*)(pDst + (y * nPitch) + 2 * x) = uvOut; } void ScaleKernelLaunch(unsigned char *dpDst, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight, bool bUVPlane = false) { hipResourceDesc resDesc = {}; resDesc.resType = hipResourceTypePitch2D; resDesc.res.pitch2D.devPtr = dpSrc; resDesc.res.pitch2D.desc = bUVPlane ? hipCreateChannelDesc<uchar2>() : hipCreateChannelDesc<unsigned char>(); resDesc.res.pitch2D.width = nSrcWidth; resDesc.res.pitch2D.height = nSrcHeight; resDesc.res.pitch2D.pitchInBytes = nSrcPitch; hipTextureDesc texDesc = {}; texDesc.filterMode = hipFilterModeLinear; texDesc.readMode = hipReadModeNormalizedFloat; texDesc.addressMode[0] = hipAddressModeClamp; texDesc.addressMode[1] = hipAddressModeClamp; texDesc.addressMode[2] = hipAddressModeClamp; hipTextureObject_t texSrc = 0; ck(hipCreateTextureObject(&texSrc, &resDesc, &texDesc, NULL)); dim3 blockSize(16, 16, 1); dim3 gridSize(((uint32_t)nDstWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nDstHeight + blockSize.y - 1) / blockSize.y, 1); if (bUVPlane) { Scale_uv << <gridSize, blockSize >> >(texSrc, dpDst, nDstPitch, nDstWidth, nDstHeight, 1.0f * nSrcWidth / nDstWidth, 1.0f * nSrcHeight / nDstHeight); } else { Scale << <gridSize, blockSize >> >(texSrc, dpDst, nDstPitch, nDstWidth, nDstHeight, 1.0f * nSrcWidth / nDstWidth, 1.0f * nSrcHeight / nDstHeight); } ck(hipGetLastError()); ck(hipDestroyTextureObject(texSrc)); } void ScaleYUV420(unsigned char *dpDstY, unsigned char* dpDstU, unsigned char* dpDstV, int nDstPitch, int nDstChromaPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcY, unsigned char* dpSrcU, unsigned char* dpSrcV, int nSrcPitch, int nSrcChromaPitch, int nSrcWidth, int nSrcHeight, bool bSemiplanar) { int chromaWidthDst = (nDstWidth + 1) / 2; int chromaHeightDst = (nDstHeight + 1) / 2; int chromaWidthSrc = (nSrcWidth + 1) / 2; int chromaHeightSrc = (nSrcHeight + 1) / 2; ScaleKernelLaunch(dpDstY, nDstPitch, nDstWidth, nDstHeight, dpSrcY, nSrcPitch, nSrcWidth, nSrcHeight); if (bSemiplanar) { ScaleKernelLaunch(dpDstU, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcU, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc, true); } else { ScaleKernelLaunch(dpDstU, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcU, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc); ScaleKernelLaunch(dpDstV, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcV, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc); } }
278e5588260d8811b8580f4a45fc7f6a9ad80cf0.cu
/* * Copyright 2017-2018 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <cuda_runtime.h> #include "NvCodecUtils.h" template<typename YuvUnitx2> static __global__ void Resize(cudaTextureObject_t texY, cudaTextureObject_t texUv, uint8_t *pDst, uint8_t *pDstUV, int nPitch, int nWidth, int nHeight, float fxScale, float fyScale) { int ix = blockIdx.x * blockDim.x + threadIdx.x, iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= nWidth / 2 || iy >= nHeight / 2) { return; } int x = ix * 2, y = iy * 2; typedef decltype(YuvUnitx2::x) YuvUnit; const int MAX = 1 << (sizeof(YuvUnit) * 8); *(YuvUnitx2 *)(pDst + y * nPitch + x * sizeof(YuvUnit)) = YuvUnitx2 { (YuvUnit)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX), (YuvUnit)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX) }; y++; *(YuvUnitx2 *)(pDst + y * nPitch + x * sizeof(YuvUnit)) = YuvUnitx2 { (YuvUnit)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX), (YuvUnit)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX) }; float2 uv = tex2D<float2>(texUv, ix / fxScale, (nHeight + iy) / fyScale + 0.5f); *(YuvUnitx2 *)(pDstUV + iy * nPitch + ix * 2 * sizeof(YuvUnit)) = YuvUnitx2{ (YuvUnit)(uv.x * MAX), (YuvUnit)(uv.y * MAX) }; } template <typename YuvUnitx2> static void Resize(unsigned char *dpDst, unsigned char* dpDstUV, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight) { cudaResourceDesc resDesc = {}; resDesc.resType = cudaResourceTypePitch2D; resDesc.res.pitch2D.devPtr = dpSrc; resDesc.res.pitch2D.desc = cudaCreateChannelDesc<decltype(YuvUnitx2::x)>(); resDesc.res.pitch2D.width = nSrcWidth; resDesc.res.pitch2D.height = nSrcHeight; resDesc.res.pitch2D.pitchInBytes = nSrcPitch; cudaTextureDesc texDesc = {}; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeNormalizedFloat; cudaTextureObject_t texY=0; ck(cudaCreateTextureObject(&texY, &resDesc, &texDesc, NULL)); resDesc.res.pitch2D.desc = cudaCreateChannelDesc<YuvUnitx2>(); resDesc.res.pitch2D.width = nSrcWidth / 2; resDesc.res.pitch2D.height = nSrcHeight * 3 / 2; cudaTextureObject_t texUv=0; ck(cudaCreateTextureObject(&texUv, &resDesc, &texDesc, NULL)); Resize<YuvUnitx2> << <dim3((nDstWidth + 31) / 32, (nDstHeight + 31) / 32), dim3(16, 16) >> >(texY, texUv, dpDst, dpDstUV, nDstPitch, nDstWidth, nDstHeight, 1.0f * nDstWidth / nSrcWidth, 1.0f * nDstHeight / nSrcHeight); ck(cudaDestroyTextureObject(texY)); ck(cudaDestroyTextureObject(texUv)); } void ResizeNv12(unsigned char *dpDstNv12, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcNv12, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstNv12UV) { unsigned char* dpDstUV = dpDstNv12UV ? dpDstNv12UV : dpDstNv12 + (nDstPitch*nDstHeight); return Resize<uchar2>(dpDstNv12, dpDstUV, nDstPitch, nDstWidth, nDstHeight, dpSrcNv12, nSrcPitch, nSrcWidth, nSrcHeight); } void ResizeP016(unsigned char *dpDstP016, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcP016, int nSrcPitch, int nSrcWidth, int nSrcHeight, unsigned char* dpDstP016UV) { unsigned char* dpDstUV = dpDstP016UV ? dpDstP016UV : dpDstP016 + (nDstPitch*nDstHeight); return Resize<ushort2>(dpDstP016, dpDstUV, nDstPitch, nDstWidth, nDstHeight, dpSrcP016, nSrcPitch, nSrcWidth, nSrcHeight); } static __global__ void Scale(cudaTextureObject_t texSrc, uint8_t *pDst, int nPitch, int nWidth, int nHeight, float fxScale, float fyScale) { int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= nWidth || y >= nHeight) { return; } *(unsigned char*)(pDst + (y * nPitch) + x) = (unsigned char)(fminf((tex2D<float>(texSrc, x * fxScale, y * fyScale)) * 255.0f, 255.0f)); } static __global__ void Scale_uv(cudaTextureObject_t texSrc, uint8_t *pDst, int nPitch, int nWidth, int nHeight, float fxScale, float fyScale) { int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= nWidth || y >= nHeight) { return; } float2 uv = tex2D<float2>(texSrc, x * fxScale, y * fyScale); uchar2 uvOut = uchar2{ (unsigned char)(fminf(uv.x * 255.0f, 255.0f)), (unsigned char)(fminf(uv.y * 255.0f, 255.0f)) }; *(uchar2*)(pDst + (y * nPitch) + 2 * x) = uvOut; } void ScaleKernelLaunch(unsigned char *dpDst, int nDstPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight, bool bUVPlane = false) { cudaResourceDesc resDesc = {}; resDesc.resType = cudaResourceTypePitch2D; resDesc.res.pitch2D.devPtr = dpSrc; resDesc.res.pitch2D.desc = bUVPlane ? cudaCreateChannelDesc<uchar2>() : cudaCreateChannelDesc<unsigned char>(); resDesc.res.pitch2D.width = nSrcWidth; resDesc.res.pitch2D.height = nSrcHeight; resDesc.res.pitch2D.pitchInBytes = nSrcPitch; cudaTextureDesc texDesc = {}; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeNormalizedFloat; texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.addressMode[2] = cudaAddressModeClamp; cudaTextureObject_t texSrc = 0; ck(cudaCreateTextureObject(&texSrc, &resDesc, &texDesc, NULL)); dim3 blockSize(16, 16, 1); dim3 gridSize(((uint32_t)nDstWidth + blockSize.x - 1) / blockSize.x, ((uint32_t)nDstHeight + blockSize.y - 1) / blockSize.y, 1); if (bUVPlane) { Scale_uv << <gridSize, blockSize >> >(texSrc, dpDst, nDstPitch, nDstWidth, nDstHeight, 1.0f * nSrcWidth / nDstWidth, 1.0f * nSrcHeight / nDstHeight); } else { Scale << <gridSize, blockSize >> >(texSrc, dpDst, nDstPitch, nDstWidth, nDstHeight, 1.0f * nSrcWidth / nDstWidth, 1.0f * nSrcHeight / nDstHeight); } ck(cudaGetLastError()); ck(cudaDestroyTextureObject(texSrc)); } void ScaleYUV420(unsigned char *dpDstY, unsigned char* dpDstU, unsigned char* dpDstV, int nDstPitch, int nDstChromaPitch, int nDstWidth, int nDstHeight, unsigned char *dpSrcY, unsigned char* dpSrcU, unsigned char* dpSrcV, int nSrcPitch, int nSrcChromaPitch, int nSrcWidth, int nSrcHeight, bool bSemiplanar) { int chromaWidthDst = (nDstWidth + 1) / 2; int chromaHeightDst = (nDstHeight + 1) / 2; int chromaWidthSrc = (nSrcWidth + 1) / 2; int chromaHeightSrc = (nSrcHeight + 1) / 2; ScaleKernelLaunch(dpDstY, nDstPitch, nDstWidth, nDstHeight, dpSrcY, nSrcPitch, nSrcWidth, nSrcHeight); if (bSemiplanar) { ScaleKernelLaunch(dpDstU, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcU, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc, true); } else { ScaleKernelLaunch(dpDstU, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcU, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc); ScaleKernelLaunch(dpDstV, nDstChromaPitch, chromaWidthDst, chromaHeightDst, dpSrcV, nSrcChromaPitch, chromaWidthSrc, chromaHeightSrc); } }
1941937b61b3611287a7559f8c934aea7ebcdf77.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <cfloat> #include <chrono> #include <fstream> #include <iostream> #include <random> #include <sstream> #include <vector> #include <boost/tokenizer.hpp> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <string> #include <cstring> #include <cctype> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <bitset> #include <random> #include "timer.h" using namespace std; void transfer(unsigned long long *key, long *value, unsigned long long *key_out, long *value_out, int kBytes, int vBytes, size_t data_size, int *new_size, int thread_id) { // unsigned long long *d_A; // long *d_B; unsigned int t, travdirtime; int GPU_number = 0; thrust::host_vector<unsigned long long> h_vec_key(data_size); thrust::host_vector<unsigned long long> h_vec_value(data_size); for(int i=0; i < data_size; i++) { h_vec_key[i] = key[i]; h_vec_value[i] = value[i]; } start_timer(&t); GPU_number = thread_id - 1; hipSetDevice(GPU_number); thrust::device_vector<unsigned long long> d_vec_key(data_size); thrust::device_vector<long> d_vec_value(data_size); thrust::copy(h_vec_key.begin(), h_vec_key.end(), d_vec_key.begin()); thrust::copy(h_vec_value.begin(), h_vec_value.end(), d_vec_value.begin()); cout << "thread:" << thread_id << " - transfer done." << endl; travdirtime = stop_timer(&t); print_timer(travdirtime); /* reduction */ start_timer(&t); thrust::sort_by_key(d_vec_key.begin(), d_vec_key.end(), d_vec_value.begin()); thrust::device_vector<unsigned long long> d_vec_key_out(data_size); thrust::device_vector<long> d_vec_value_out(data_size); auto new_end = thrust::reduce_by_key(d_vec_key.begin(), d_vec_key.end(), d_vec_value.begin(), d_vec_key_out.begin(), d_vec_value_out.begin()); int new_size_r = new_end.first - d_vec_key_out.begin(); cout << "thread:" << thread_id << " - reduction done." << endl; travdirtime = stop_timer(&t); print_timer(travdirtime); for(int i = 0; i < 5; i++) { cout << "threadID:" << thread_id << "-" << d_vec_key_out[i] << "," << d_vec_value_out[i] << endl; } /* start_timer(&t); for(int i = 0; i < new_size; i++) { key_out[i] = d_vec_key_out[i]; value_out[i] = d_vec_value_out[i]; } cout << "thread:" << thread_id << " - transfer(rev) done with new_size " << new_size << endl; travdirtime = stop_timer(&t); print_timer(travdirtime); */ start_timer(&t); thrust::host_vector<unsigned long long> h_vec_key_2(data_size); thrust::host_vector<long> h_vec_value_2(data_size); thrust::copy(d_vec_value_out.begin(),d_vec_value_out.end(),h_vec_value_2.begin()); thrust::copy(d_vec_key_out.begin(),d_vec_key_out.end(),h_vec_key_2.begin()); for(int i = 0; i < new_size_r; i++) { key_out[i] = h_vec_key_2[i]; value_out[i] = h_vec_value_2[i]; } cout << "thread:" << thread_id << " - transfer(rev) done with new_size " << new_size_r << endl; travdirtime = stop_timer(&t); print_timer(travdirtime); (*new_size) = new_size_r; /* for(int i = 0; i < 5; i++) { cout << "threadID:" << thread_id << "-" << h_vec_key_2[i] << "," << h_vec_value_2[i] << endl; } */ /* start_timer(&t); hipSetDevice(thread_id); hipMalloc((unsigned long long**)&d_A, kBytes); hipMalloc((long**)&d_B, vBytes); hipMemcpy(d_A, key, kBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, value, vBytes, hipMemcpyHostToDevice); cout << "thread:" << thread_id << " - transfer done." << endl; travdirtime = stop_timer(&t); print_timer(travdirtime); */ }
1941937b61b3611287a7559f8c934aea7ebcdf77.cu
#include <algorithm> #include <cfloat> #include <chrono> #include <fstream> #include <iostream> #include <random> #include <sstream> #include <vector> #include <boost/tokenizer.hpp> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <string> #include <cstring> #include <cctype> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <bitset> #include <random> #include "timer.h" using namespace std; void transfer(unsigned long long *key, long *value, unsigned long long *key_out, long *value_out, int kBytes, int vBytes, size_t data_size, int *new_size, int thread_id) { // unsigned long long *d_A; // long *d_B; unsigned int t, travdirtime; int GPU_number = 0; thrust::host_vector<unsigned long long> h_vec_key(data_size); thrust::host_vector<unsigned long long> h_vec_value(data_size); for(int i=0; i < data_size; i++) { h_vec_key[i] = key[i]; h_vec_value[i] = value[i]; } start_timer(&t); GPU_number = thread_id - 1; cudaSetDevice(GPU_number); thrust::device_vector<unsigned long long> d_vec_key(data_size); thrust::device_vector<long> d_vec_value(data_size); thrust::copy(h_vec_key.begin(), h_vec_key.end(), d_vec_key.begin()); thrust::copy(h_vec_value.begin(), h_vec_value.end(), d_vec_value.begin()); cout << "thread:" << thread_id << " - transfer done." << endl; travdirtime = stop_timer(&t); print_timer(travdirtime); /* reduction */ start_timer(&t); thrust::sort_by_key(d_vec_key.begin(), d_vec_key.end(), d_vec_value.begin()); thrust::device_vector<unsigned long long> d_vec_key_out(data_size); thrust::device_vector<long> d_vec_value_out(data_size); auto new_end = thrust::reduce_by_key(d_vec_key.begin(), d_vec_key.end(), d_vec_value.begin(), d_vec_key_out.begin(), d_vec_value_out.begin()); int new_size_r = new_end.first - d_vec_key_out.begin(); cout << "thread:" << thread_id << " - reduction done." << endl; travdirtime = stop_timer(&t); print_timer(travdirtime); for(int i = 0; i < 5; i++) { cout << "threadID:" << thread_id << "-" << d_vec_key_out[i] << "," << d_vec_value_out[i] << endl; } /* start_timer(&t); for(int i = 0; i < new_size; i++) { key_out[i] = d_vec_key_out[i]; value_out[i] = d_vec_value_out[i]; } cout << "thread:" << thread_id << " - transfer(rev) done with new_size " << new_size << endl; travdirtime = stop_timer(&t); print_timer(travdirtime); */ start_timer(&t); thrust::host_vector<unsigned long long> h_vec_key_2(data_size); thrust::host_vector<long> h_vec_value_2(data_size); thrust::copy(d_vec_value_out.begin(),d_vec_value_out.end(),h_vec_value_2.begin()); thrust::copy(d_vec_key_out.begin(),d_vec_key_out.end(),h_vec_key_2.begin()); for(int i = 0; i < new_size_r; i++) { key_out[i] = h_vec_key_2[i]; value_out[i] = h_vec_value_2[i]; } cout << "thread:" << thread_id << " - transfer(rev) done with new_size " << new_size_r << endl; travdirtime = stop_timer(&t); print_timer(travdirtime); (*new_size) = new_size_r; /* for(int i = 0; i < 5; i++) { cout << "threadID:" << thread_id << "-" << h_vec_key_2[i] << "," << h_vec_value_2[i] << endl; } */ /* start_timer(&t); cudaSetDevice(thread_id); cudaMalloc((unsigned long long**)&d_A, kBytes); cudaMalloc((long**)&d_B, vBytes); cudaMemcpy(d_A, key, kBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, value, vBytes, cudaMemcpyHostToDevice); cout << "thread:" << thread_id << " - transfer done." << endl; travdirtime = stop_timer(&t); print_timer(travdirtime); */ }
085e86214221df17ed74d088169d79238e23c2fd.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "rgb_to_yuv_convert_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "../rgb_to_yuv_convert_layer.h" #include "../neural_network_exception.h" #include "../nn_types.h" #define w_r 0.299F #define w_b 0.114F #define w_g (1.0F - w_r - w_b) #define u_max 0.436F #define v_max 0.615F #define u_mult (u_max / (1.0F - w_b)) #define v_mult (v_max / (1.0F - w_r)) #define reverse_r_v_mult ((1.0F - w_r) / v_max) #define reverse_g_u_mult (-(w_b * (1.0F - w_b)) / (u_max * w_g)) #define reverse_g_v_mult (-(w_r * (1.0F - w_r)) / (v_max * w_g)) #define reverse_b_u_mult ((1.0F - w_b) / u_max) namespace nnforge { namespace cuda { __global__ void rgb_to_yuv_convert_upd_kernel( const float * __restrict input, float * __restrict output, const int * __restrict color_feature_map_config_list, int feature_map_count, int elem_count_per_feature_map, int color_feature_map_config_count, int entry_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count)) { int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3; int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset]; int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1]; int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2]; int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id; int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset; int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset; int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset; float red = input[red_and_y_offset]; float green = input[green_and_u_offset]; float blue = input[blue_and_v_offset]; float y = w_r * red + w_g * green + w_b * blue; float u = u_mult * (blue - y); float v = v_mult * (red - y); output[red_and_y_offset] = y; output[green_and_u_offset] = u; output[blue_and_v_offset] = v; } } __global__ void rgb_to_yuv_convert_deriviative_upd_kernel( float * __restrict input_errors, const float * __restrict output_errors, const int * __restrict color_feature_map_config_list, int feature_map_count, int elem_count_per_feature_map, int color_feature_map_config_count, bool add_update_to_destination, int entry_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count)) { int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3; int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset]; int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1]; int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2]; int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id; int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset; int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset; int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset; float y = output_errors[red_and_y_offset]; float u = output_errors[green_and_u_offset]; float v = output_errors[blue_and_v_offset]; float red = y + reverse_r_v_mult * v; float green = y + reverse_g_u_mult * u + reverse_g_v_mult * v; float blue = y + reverse_b_u_mult * u; if (add_update_to_destination) { input_errors[red_and_y_offset] += red; input_errors[green_and_u_offset] += green; input_errors[blue_and_v_offset] += blue; } else { input_errors[red_and_y_offset] = red; input_errors[green_and_u_offset] = green; input_errors[blue_and_v_offset] = blue; } } } rgb_to_yuv_convert_layer_updater_cuda::rgb_to_yuv_convert_layer_updater_cuda() { } rgb_to_yuv_convert_layer_updater_cuda::~rgb_to_yuv_convert_layer_updater_cuda() { } void rgb_to_yuv_convert_layer_updater_cuda::enqueue_forward_propagation( hipStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_fixed_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { if ((color_feature_map_config_count != output_configuration_specific.feature_map_count * 3) && ((const float *)*output_buffer != (const float *)*input_buffers[1])) { cuda_util::copy_buffer( *cuda_config, *input_buffers[0], *output_buffer, output_elem_count_per_entry * entry_count, stream_id); } std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, color_feature_map_config_count, entry_count); hipLaunchKernelGGL(( rgb_to_yuv_convert_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_buffers[0], *output_buffer, *schema_data[0], output_configuration_specific.feature_map_count, output_elem_count_per_feature_map, color_feature_map_config_count, entry_count); } void rgb_to_yuv_convert_layer_updater_cuda::enqueue_backward_data_propagation( hipStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { if (((const float *)*output_errors_buffer != (const float *)*input_errors_buffer) && ((color_feature_map_config_count != output_configuration_specific.feature_map_count * 3) || add_update_to_destination)) { cuda_util::copy_buffer( *cuda_config, *output_errors_buffer, *input_errors_buffer, output_elem_count_per_entry * entry_count, stream_id); } std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, color_feature_map_config_count, entry_count); hipLaunchKernelGGL(( rgb_to_yuv_convert_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *output_errors_buffer, *schema_data[0], output_configuration_specific.feature_map_count, output_elem_count_per_feature_map, color_feature_map_config_count, add_update_to_destination, entry_count); } int rgb_to_yuv_convert_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const { return 0; } bool rgb_to_yuv_convert_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const { return false; } bool rgb_to_yuv_convert_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } void rgb_to_yuv_convert_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = nnforge_dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema); color_feature_map_config_count = static_cast<int>(layer_derived->color_feature_map_config_list.size()); } } }
085e86214221df17ed74d088169d79238e23c2fd.cu
/* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "rgb_to_yuv_convert_layer_updater_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "../rgb_to_yuv_convert_layer.h" #include "../neural_network_exception.h" #include "../nn_types.h" #define w_r 0.299F #define w_b 0.114F #define w_g (1.0F - w_r - w_b) #define u_max 0.436F #define v_max 0.615F #define u_mult (u_max / (1.0F - w_b)) #define v_mult (v_max / (1.0F - w_r)) #define reverse_r_v_mult ((1.0F - w_r) / v_max) #define reverse_g_u_mult (-(w_b * (1.0F - w_b)) / (u_max * w_g)) #define reverse_g_v_mult (-(w_r * (1.0F - w_r)) / (v_max * w_g)) #define reverse_b_u_mult ((1.0F - w_b) / u_max) namespace nnforge { namespace cuda { __global__ void rgb_to_yuv_convert_upd_kernel( const float * __restrict input, float * __restrict output, const int * __restrict color_feature_map_config_list, int feature_map_count, int elem_count_per_feature_map, int color_feature_map_config_count, int entry_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count)) { int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3; int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset]; int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1]; int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2]; int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id; int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset; int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset; int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset; float red = input[red_and_y_offset]; float green = input[green_and_u_offset]; float blue = input[blue_and_v_offset]; float y = w_r * red + w_g * green + w_b * blue; float u = u_mult * (blue - y); float v = v_mult * (red - y); output[red_and_y_offset] = y; output[green_and_u_offset] = u; output[blue_and_v_offset] = v; } } __global__ void rgb_to_yuv_convert_deriviative_upd_kernel( float * __restrict input_errors, const float * __restrict output_errors, const int * __restrict color_feature_map_config_list, int feature_map_count, int elem_count_per_feature_map, int color_feature_map_config_count, bool add_update_to_destination, int entry_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count)) { int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3; int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset]; int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1]; int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2]; int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id; int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset; int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset; int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset; float y = output_errors[red_and_y_offset]; float u = output_errors[green_and_u_offset]; float v = output_errors[blue_and_v_offset]; float red = y + reverse_r_v_mult * v; float green = y + reverse_g_u_mult * u + reverse_g_v_mult * v; float blue = y + reverse_b_u_mult * u; if (add_update_to_destination) { input_errors[red_and_y_offset] += red; input_errors[green_and_u_offset] += green; input_errors[blue_and_v_offset] += blue; } else { input_errors[red_and_y_offset] = red; input_errors[green_and_u_offset] = green; input_errors[blue_and_v_offset] = blue; } } } rgb_to_yuv_convert_layer_updater_cuda::rgb_to_yuv_convert_layer_updater_cuda() { } rgb_to_yuv_convert_layer_updater_cuda::~rgb_to_yuv_convert_layer_updater_cuda() { } void rgb_to_yuv_convert_layer_updater_cuda::enqueue_forward_propagation( cudaStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_fixed_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { if ((color_feature_map_config_count != output_configuration_specific.feature_map_count * 3) && ((const float *)*output_buffer != (const float *)*input_buffers[1])) { cuda_util::copy_buffer( *cuda_config, *input_buffers[0], *output_buffer, output_elem_count_per_entry * entry_count, stream_id); } std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, color_feature_map_config_count, entry_count); rgb_to_yuv_convert_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_buffers[0], *output_buffer, *schema_data[0], output_configuration_specific.feature_map_count, output_elem_count_per_feature_map, color_feature_map_config_count, entry_count); } void rgb_to_yuv_convert_layer_updater_cuda::enqueue_backward_data_propagation( cudaStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { if (((const float *)*output_errors_buffer != (const float *)*input_errors_buffer) && ((color_feature_map_config_count != output_configuration_specific.feature_map_count * 3) || add_update_to_destination)) { cuda_util::copy_buffer( *cuda_config, *output_errors_buffer, *input_errors_buffer, output_elem_count_per_entry * entry_count, stream_id); } std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, color_feature_map_config_count, entry_count); rgb_to_yuv_convert_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *output_errors_buffer, *schema_data[0], output_configuration_specific.feature_map_count, output_elem_count_per_feature_map, color_feature_map_config_count, add_update_to_destination, entry_count); } int rgb_to_yuv_convert_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const { return 0; } bool rgb_to_yuv_convert_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const { return false; } bool rgb_to_yuv_convert_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } void rgb_to_yuv_convert_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = nnforge_dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema); color_feature_map_config_count = static_cast<int>(layer_derived->color_feature_map_config_list.size()); } } }
a7830c1ab0438ed3302ac8ba8f0b1117cfa7207a.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2017-2020 XGBoost contributors */ #include <thrust/copy.h> #include <thrust/reduce.h> #include <xgboost/tree_updater.h> #include <algorithm> #include <cmath> #include <memory> #include <limits> #include <utility> #include <vector> #include "xgboost/host_device_vector.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #include "xgboost/json.h" #include "../common/io.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" #include "../common/bitfield.h" #include "../common/timer.h" #include "../common/categorical.h" #include "../data/ellpack_page.cuh" #include "param.h" #include "updater_gpu_common.cuh" #include "split_evaluator.h" #include "constraints.cuh" #include "gpu_hist/feature_groups.cuh" #include "gpu_hist/gradient_based_sampler.cuh" #include "gpu_hist/row_partitioner.cuh" #include "gpu_hist/histogram.cuh" #include "gpu_hist/evaluate_splits.cuh" #include "gpu_hist/driver.cuh" namespace xgboost { namespace tree { #if !defined(GTEST_TEST) DMLC_REGISTRY_FILE_TAG(updater_gpu_hist); #endif // !defined(GTEST_TEST) // training parameters specific to this algorithm struct GPUHistMakerTrainParam : public XGBoostParameter<GPUHistMakerTrainParam> { bool single_precision_histogram; bool deterministic_histogram; bool debug_synchronize; // declare parameters DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) { DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe( "Use single precision to build histograms."); DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe( "Pre-round the gradient for obtaining deterministic gradient histogram."); DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe( "Check if all distributed tree are identical after tree construction."); } }; #if !defined(GTEST_TEST) DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam); #endif // !defined(GTEST_TEST) /** * \struct DeviceHistogram * * \summary Data storage for node histograms on device. Automatically expands. * * \tparam GradientSumT histogram entry type. * \tparam kStopGrowingSize Do not grow beyond this size * * \author Rory * \date 28/07/2018 */ template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26> class DeviceHistogram { private: /*! \brief Map nidx to starting index of its histogram. */ std::map<int, size_t> nidx_map_; dh::device_vector<typename GradientSumT::ValueT> data_; int n_bins_; int device_id_; static constexpr size_t kNumItemsInGradientSum = sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT); static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2."); public: void Init(int device_id, int n_bins) { this->n_bins_ = n_bins; this->device_id_ = device_id; } void Reset() { auto d_data = data_.data().get(); dh::LaunchN(device_id_, data_.size(), [=] __device__(size_t idx) { d_data[idx] = 0.0f; }); nidx_map_.clear(); } bool HistogramExists(int nidx) const { return nidx_map_.find(nidx) != nidx_map_.cend(); } int Bins() const { return n_bins_; } size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; } dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; } void AllocateHistogram(int nidx) { if (HistogramExists(nidx)) return; // Number of items currently used in data const size_t used_size = nidx_map_.size() * HistogramSize(); const size_t new_used_size = used_size + HistogramSize(); if (data_.size() >= kStopGrowingSize) { // Recycle histogram memory if (new_used_size <= data_.size()) { // no need to remove old node, just insert the new one. nidx_map_[nidx] = used_size; // memset histogram size in bytes } else { std::pair<int, size_t> old_entry = *nidx_map_.begin(); nidx_map_.erase(old_entry.first); nidx_map_[nidx] = old_entry.second; } // Zero recycled memory auto d_data = data_.data().get() + nidx_map_[nidx]; dh::LaunchN(device_id_, n_bins_ * 2, [=] __device__(size_t idx) { d_data[idx] = 0.0f; }); } else { // Append new node histogram nidx_map_[nidx] = used_size; // Check there is enough memory for another histogram node if (data_.size() < new_used_size + HistogramSize()) { size_t new_required_memory = ::max(data_.size() * 2, HistogramSize()); data_.resize(new_required_memory); } } CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize()); } /** * \summary Return pointer to histogram memory for a given node. * \param nidx Tree node index. * \return hist pointer. */ common::Span<GradientSumT> GetNodeHistogram(int nidx) { CHECK(this->HistogramExists(nidx)); auto ptr = data_.data().get() + nidx_map_[nidx]; return common::Span<GradientSumT>( reinterpret_cast<GradientSumT*>(ptr), n_bins_); } }; // Manage memory for a single GPU template <typename GradientSumT> struct GPUHistMakerDevice { int device_id; EllpackPageImpl* page; common::Span<FeatureType const> feature_types; BatchParam batch_param; std::unique_ptr<RowPartitioner> row_partitioner; DeviceHistogram<GradientSumT> hist{}; common::Span<GradientPair> gpair; dh::caching_device_vector<int> monotone_constraints; /*! \brief Sum gradient for each node. */ std::vector<GradientPair> node_sum_gradients; TrainParam param; bool deterministic_histogram; GradientSumT histogram_rounding; dh::PinnedMemory pinned; std::vector<hipStream_t> streams{}; common::Monitor monitor; TreeEvaluator tree_evaluator; common::ColumnSampler column_sampler; FeatureInteractionConstraintDevice interaction_constraints; std::unique_ptr<GradientBasedSampler> sampler; std::unique_ptr<FeatureGroups> feature_groups; // Storing split categories for last node. dh::caching_device_vector<uint32_t> node_categories; GPUHistMakerDevice(int _device_id, EllpackPageImpl* _page, common::Span<FeatureType const> _feature_types, bst_uint _n_rows, TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features, bool deterministic_histogram, BatchParam _batch_param) : device_id(_device_id), page(_page), feature_types{_feature_types}, param(std::move(_param)), tree_evaluator(param, n_features, _device_id), column_sampler(column_sampler_seed), interaction_constraints(param, n_features), deterministic_histogram{deterministic_histogram}, batch_param(_batch_param) { sampler.reset(new GradientBasedSampler( page, _n_rows, batch_param, param.subsample, param.sampling_method)); if (!param.monotone_constraints.empty()) { // Copy assigning an empty vector causes an exception in MSVC debug builds monotone_constraints = param.monotone_constraints; } node_sum_gradients.resize(param.MaxNodes()); // Init histogram hist.Init(device_id, page->Cuts().TotalBins()); monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id)); feature_groups.reset(new FeatureGroups( page->Cuts(), page->is_dense, dh::MaxSharedMemoryOptin(device_id), sizeof(GradientSumT))); } ~GPUHistMakerDevice() { // NOLINT dh::safe_cuda(hipSetDevice(device_id)); for (auto& stream : streams) { dh::safe_cuda(hipStreamDestroy(stream)); } } // Get vector of at least n initialised streams std::vector<hipStream_t>& GetStreams(int n) { if (n > streams.size()) { for (auto& stream : streams) { dh::safe_cuda(hipStreamDestroy(stream)); } streams.clear(); streams.resize(n); for (auto& stream : streams) { dh::safe_cuda(hipStreamCreate(&stream)); } } return streams; } // Reset values for each update iteration // Note that the column sampler must be passed by value because it is not // thread safe void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) { auto const& info = dmat->Info(); this->column_sampler.Init(num_columns, info.feature_weigths.HostVector(), param.colsample_bynode, param.colsample_bylevel, param.colsample_bytree); dh::safe_cuda(hipSetDevice(device_id)); tree_evaluator = TreeEvaluator(param, dmat->Info().num_col_, device_id); this->interaction_constraints.Reset(); std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), GradientPair()); auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat); page = sample.page; gpair = sample.gpair; if (deterministic_histogram) { histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair); } else { histogram_rounding = GradientSumT{0.0, 0.0}; } row_partitioner.reset(); // Release the device memory first before reallocating row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows)); hist.Reset(); } DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) { int nidx = RegTree::kRoot; dh::TemporaryArray<DeviceSplitCandidate> splits_out(1); GPUTrainingParam gpu_param(param); auto sampled_features = column_sampler.GetFeatureSet(0); sampled_features->SetDevice(device_id); common::Span<bst_feature_t> feature_set = interaction_constraints.Query(sampled_features->DeviceSpan(), nidx); auto matrix = page->GetDeviceAccessor(device_id); EvaluateSplitInputs<GradientSumT> inputs{ nidx, {root_sum.GetGrad(), root_sum.GetHess()}, gpu_param, feature_set, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map, matrix.min_fvalue, hist.GetNodeHistogram(nidx)}; auto gain_calc = tree_evaluator.GetEvaluator<GPUTrainingParam>(); EvaluateSingleSplit(dh::ToSpan(splits_out), gain_calc, inputs); std::vector<DeviceSplitCandidate> result(1); dh::safe_cuda(hipMemcpy(result.data(), splits_out.data().get(), sizeof(DeviceSplitCandidate) * splits_out.size(), hipMemcpyDeviceToHost)); return result.front(); } void EvaluateLeftRightSplits( ExpandEntry candidate, int left_nidx, int right_nidx, const RegTree& tree, common::Span<ExpandEntry> pinned_candidates_out) { dh::TemporaryArray<DeviceSplitCandidate> splits_out(2); GPUTrainingParam gpu_param(param); auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx)); left_sampled_features->SetDevice(device_id); common::Span<bst_feature_t> left_feature_set = interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx); auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx)); right_sampled_features->SetDevice(device_id); common::Span<bst_feature_t> right_feature_set = interaction_constraints.Query(right_sampled_features->DeviceSpan(), left_nidx); auto matrix = page->GetDeviceAccessor(device_id); EvaluateSplitInputs<GradientSumT> left{ left_nidx, {candidate.split.left_sum.GetGrad(), candidate.split.left_sum.GetHess()}, gpu_param, left_feature_set, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map, matrix.min_fvalue, hist.GetNodeHistogram(left_nidx)}; EvaluateSplitInputs<GradientSumT> right{ right_nidx, {candidate.split.right_sum.GetGrad(), candidate.split.right_sum.GetHess()}, gpu_param, right_feature_set, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map, matrix.min_fvalue, hist.GetNodeHistogram(right_nidx)}; auto d_splits_out = dh::ToSpan(splits_out); EvaluateSplits(d_splits_out, tree_evaluator.GetEvaluator<GPUTrainingParam>(), left, right); dh::TemporaryArray<ExpandEntry> entries(2); auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>(); auto d_entries = entries.data().get(); dh::LaunchN(device_id, 2, [=] __device__(size_t idx) { auto split = d_splits_out[idx]; auto nidx = idx == 0 ? left_nidx : right_nidx; float base_weight = evaluator.CalcWeight( nidx, gpu_param, GradStats{split.left_sum + split.right_sum}); float left_weight = evaluator.CalcWeight(nidx, gpu_param, GradStats{split.left_sum}); float right_weight = evaluator.CalcWeight( nidx, gpu_param, GradStats{split.right_sum}); d_entries[idx] = ExpandEntry{nidx, candidate.depth + 1, d_splits_out[idx], base_weight, left_weight, right_weight}; }); dh::safe_cuda(hipMemcpyAsync( pinned_candidates_out.data(), entries.data().get(), sizeof(ExpandEntry) * entries.size(), hipMemcpyDeviceToHost)); } void BuildHist(int nidx) { hist.AllocateHistogram(nidx); auto d_node_hist = hist.GetNodeHistogram(nidx); auto d_ridx = row_partitioner->GetRows(nidx); BuildGradientHistogram(page->GetDeviceAccessor(device_id), feature_groups->DeviceAccessor(device_id), gpair, d_ridx, d_node_hist, histogram_rounding); } void SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent); auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram); auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction); dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) { d_node_hist_subtraction[idx] = d_node_hist_parent[idx] - d_node_hist_histogram[idx]; }); } bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { // Make sure histograms are already allocated hist.AllocateHistogram(nidx_subtraction); return hist.HistogramExists(nidx_histogram) && hist.HistogramExists(nidx_parent); } void UpdatePosition(int nidx, RegTree* p_tree) { RegTree::Node split_node = (*p_tree)[nidx]; auto split_type = p_tree->NodeSplitType(nidx); auto d_matrix = page->GetDeviceAccessor(device_id); auto node_cats = dh::ToSpan(node_categories); row_partitioner->UpdatePosition( nidx, split_node.LeftChild(), split_node.RightChild(), [=] __device__(bst_uint ridx) { // given a row index, returns the node id it belongs to bst_float cut_value = d_matrix.GetFvalue(ridx, split_node.SplitIndex()); // Missing value bst_node_t new_position = 0; if (isnan(cut_value)) { new_position = split_node.DefaultChild(); } else { bool go_left = true; if (split_type == FeatureType::kCategorical) { go_left = common::Decision(node_cats, common::AsCat(cut_value)); } else { go_left = cut_value <= split_node.SplitCond(); } if (go_left) { new_position = split_node.LeftChild(); } else { new_position = split_node.RightChild(); } } return new_position; }); } // After tree update is finished, update the position of all training // instances to their final leaf. This information is used later to update the // prediction cache void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) { dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size()); dh::safe_cuda(hipMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(), d_nodes.size() * sizeof(RegTree::Node), hipMemcpyHostToDevice)); auto const& h_split_types = p_tree->GetSplitTypes(); auto const& categories = p_tree->GetSplitCategories(); auto const& categories_segments = p_tree->GetSplitCategoriesPtr(); dh::caching_device_vector<FeatureType> d_split_types; dh::caching_device_vector<uint32_t> d_categories; dh::caching_device_vector<RegTree::Segment> d_categories_segments; if (!categories.empty()) { dh::CopyToD(h_split_types, &d_split_types); dh::CopyToD(categories, &d_categories); dh::CopyToD(categories_segments, &d_categories_segments); } if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) { row_partitioner.reset(); // Release the device memory first before reallocating row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_)); } if (page->n_rows == p_fmat->Info().num_row_) { FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types), dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments)); } else { for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) { FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes), dh::ToSpan(d_split_types), dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments)); } } } void FinalisePositionInPage(EllpackPageImpl *page, const common::Span<RegTree::Node> d_nodes, common::Span<FeatureType const> d_feature_types, common::Span<uint32_t const> categories, common::Span<RegTree::Segment> categories_segments) { auto d_matrix = page->GetDeviceAccessor(device_id); row_partitioner->FinalisePosition( [=] __device__(size_t row_id, int position) { // What happens if user prune the tree? if (!d_matrix.IsInRange(row_id)) { return RowPartitioner::kIgnoredTreePosition; } auto node = d_nodes[position]; while (!node.IsLeaf()) { bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex()); // Missing value if (isnan(element)) { position = node.DefaultChild(); } else { bool go_left = true; if (common::IsCat(d_feature_types, position)) { auto node_cats = categories.subspan(categories_segments[position].beg, categories_segments[position].size); go_left = common::Decision(node_cats, common::AsCat(element)); } else { go_left = element <= node.SplitCond(); } if (go_left) { position = node.LeftChild(); } else { position = node.RightChild(); } } node = d_nodes[position]; } return position; }); } void UpdatePredictionCache(common::Span<bst_float> out_preds_d) { dh::safe_cuda(hipSetDevice(device_id)); auto d_ridx = row_partitioner->GetRows(); GPUTrainingParam param_d(param); dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size()); dh::safe_cuda( hipMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(), sizeof(GradientPair) * node_sum_gradients.size(), hipMemcpyHostToDevice)); auto d_position = row_partitioner->GetPosition(); auto d_node_sum_gradients = device_node_sum_gradients.data().get(); auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>(); dh::LaunchN( device_id, out_preds_d.size(), [=] __device__(int local_idx) { int pos = d_position[local_idx]; bst_float weight = evaluator.CalcWeight(pos, param_d, GradStats{d_node_sum_gradients[pos]}); out_preds_d[d_ridx[local_idx]] += weight * param_d.learning_rate; }); row_partitioner.reset(); } void AllReduceHist(int nidx, dh::AllReducer* reducer) { monitor.Start("AllReduce"); auto d_node_hist = hist.GetNodeHistogram(nidx).data(); reducer->AllReduceSum( reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist), reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist), page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT))); monitor.Stop("AllReduce"); } /** * \brief Build GPU local histograms for the left and right child of some parent node */ void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left, int nidx_right, dh::AllReducer* reducer) { auto build_hist_nidx = nidx_left; auto subtraction_trick_nidx = nidx_right; // Decide whether to build the left histogram or right histogram // Use sum of Hessian as a heuristic to select node with fewest training instances bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess(); if (fewer_right) { std::swap(build_hist_nidx, subtraction_trick_nidx); } this->BuildHist(build_hist_nidx); this->AllReduceHist(build_hist_nidx, reducer); // Check whether we can use the subtraction trick to calculate the other bool do_subtraction_trick = this->CanDoSubtractionTrick( candidate.nid, build_hist_nidx, subtraction_trick_nidx); if (do_subtraction_trick) { // Calculate other histogram using subtraction trick this->SubtractionTrick(candidate.nid, build_hist_nidx, subtraction_trick_nidx); } else { // Calculate other histogram manually this->BuildHist(subtraction_trick_nidx); this->AllReduceHist(subtraction_trick_nidx, reducer); } } void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) { RegTree& tree = *p_tree; auto evaluator = tree_evaluator.GetEvaluator(); auto parent_sum = candidate.split.left_sum + candidate.split.right_sum; auto base_weight = candidate.base_weight; auto left_weight = candidate.left_weight * param.learning_rate; auto right_weight = candidate.right_weight * param.learning_rate; auto is_cat = candidate.split.is_cat; if (is_cat) { auto cat = common::AsCat(candidate.split.fvalue); std::vector<uint32_t> split_cats(LBitField32::ComputeStorageSize(::max(cat+1, 1)), 0); LBitField32 cats_bits(split_cats); cats_bits.Set(cat); dh::CopyToD(split_cats, &node_categories); tree.ExpandCategorical( candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.GetHess(), candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess()); } else { tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue, candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.GetHess(), candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess()); } // Set up child constraints auto left_child = tree[candidate.nid].LeftChild(); auto right_child = tree[candidate.nid].RightChild(); tree_evaluator.AddSplit(candidate.nid, left_child, right_child, tree[candidate.nid].SplitIndex(), candidate.left_weight, candidate.right_weight); node_sum_gradients[tree[candidate.nid].LeftChild()] = candidate.split.left_sum; node_sum_gradients[tree[candidate.nid].RightChild()] = candidate.split.right_sum; interaction_constraints.Split( candidate.nid, tree[candidate.nid].SplitIndex(), tree[candidate.nid].LeftChild(), tree[candidate.nid].RightChild()); } ExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) { constexpr bst_node_t kRootNIdx = 0; dh::XGBCachingDeviceAllocator<char> alloc; GradientPair root_sum = dh::Reduce( thrust::hip::par(alloc), thrust::device_ptr<GradientPair const>(gpair.data()), thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()), GradientPair{}, thrust::plus<GradientPair>{}); rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum), 2); this->BuildHist(kRootNIdx); this->AllReduceHist(kRootNIdx, reducer); // Remember root stats node_sum_gradients[kRootNIdx] = root_sum; p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess(); auto weight = CalcWeight(param, root_sum); p_tree->Stat(kRootNIdx).base_weight = weight; (*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight); // Generate first split auto split = this->EvaluateRootSplit(root_sum); dh::TemporaryArray<ExpandEntry> entries(1); auto d_entries = entries.data().get(); auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>(); GPUTrainingParam gpu_param(param); auto depth = p_tree->GetDepth(kRootNIdx); dh::LaunchN(device_id, 1, [=] __device__(size_t idx) { float left_weight = evaluator.CalcWeight(kRootNIdx, gpu_param, GradStats{split.left_sum}); float right_weight = evaluator.CalcWeight( kRootNIdx, gpu_param, GradStats{split.right_sum}); d_entries[0] = ExpandEntry(kRootNIdx, depth, split, weight, left_weight, right_weight); }); ExpandEntry root_entry; dh::safe_cuda(hipMemcpyAsync( &root_entry, entries.data().get(), sizeof(ExpandEntry) * entries.size(), hipMemcpyDeviceToHost)); return root_entry; } void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, RegTree* p_tree, dh::AllReducer* reducer) { auto& tree = *p_tree; Driver driver(static_cast<TrainParam::TreeGrowPolicy>(param.grow_policy)); monitor.Start("Reset"); this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_); monitor.Stop("Reset"); monitor.Start("InitRoot"); driver.Push({ this->InitRoot(p_tree, reducer) }); monitor.Stop("InitRoot"); auto num_leaves = 1; // The set of leaves that can be expanded asynchronously auto expand_set = driver.Pop(); while (!expand_set.empty()) { auto new_candidates = pinned.GetSpan<ExpandEntry>(expand_set.size() * 2, ExpandEntry()); for (auto i = 0ull; i < expand_set.size(); i++) { auto candidate = expand_set.at(i); if (!candidate.IsValid(param, num_leaves)) { continue; } this->ApplySplit(candidate, p_tree); num_leaves++; int left_child_nidx = tree[candidate.nid].LeftChild(); int right_child_nidx = tree[candidate.nid].RightChild(); // Only create child entries if needed if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx), num_leaves)) { monitor.Start("UpdatePosition"); this->UpdatePosition(candidate.nid, p_tree); monitor.Stop("UpdatePosition"); monitor.Start("BuildHist"); this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer); monitor.Stop("BuildHist"); monitor.Start("EvaluateSplits"); this->EvaluateLeftRightSplits(candidate, left_child_nidx, right_child_nidx, *p_tree, new_candidates.subspan(i * 2, 2)); monitor.Stop("EvaluateSplits"); } else { // Set default new_candidates[i * 2] = ExpandEntry(); new_candidates[i * 2 + 1] = ExpandEntry(); } } dh::safe_cuda(hipDeviceSynchronize()); driver.Push(new_candidates.begin(), new_candidates.end()); expand_set = driver.Pop(); } monitor.Start("FinalisePosition"); this->FinalisePosition(p_tree, p_fmat); monitor.Stop("FinalisePosition"); } }; template <typename GradientSumT> class GPUHistMakerSpecialised { public: GPUHistMakerSpecialised() = default; void Configure(const Args& args, GenericParameter const* generic_param) { param_.UpdateAllowUnknown(args); generic_param_ = generic_param; hist_maker_param_.UpdateAllowUnknown(args); dh::CheckComputeCapability(); monitor_.Init("updater_gpu_hist"); } ~GPUHistMakerSpecialised() { // NOLINT dh::GlobalMemoryLogger().Log(); } void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) { monitor_.Start("Update"); // rescale learning rate according to size of trees float lr = param_.learning_rate; param_.learning_rate = lr / trees.size(); // build tree try { for (xgboost::RegTree* tree : trees) { this->UpdateTree(gpair, dmat, tree); if (hist_maker_param_.debug_synchronize) { this->CheckTreesSynchronized(tree); } } dh::safe_cuda(hipGetLastError()); } catch (const std::exception& e) { LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl; } param_.learning_rate = lr; monitor_.Stop("Update"); } void InitDataOnce(DMatrix* dmat) { device_ = generic_param_->gpu_id; CHECK_GE(device_, 0) << "Must have at least one device"; info_ = &dmat->Info(); reducer_.Init({device_}); // NOLINT // Synchronise the column sampling seed uint32_t column_sampling_seed = common::GlobalRandom()(); rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0); BatchParam batch_param{ device_, param_.max_bin, generic_param_->gpu_page_size }; auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl(); dh::safe_cuda(hipSetDevice(device_)); info_->feature_types.SetDevice(device_); maker.reset(new GPUHistMakerDevice<GradientSumT>(device_, page, info_->feature_types.ConstDeviceSpan(), info_->num_row_, param_, column_sampling_seed, info_->num_col_, hist_maker_param_.deterministic_histogram, batch_param)); p_last_fmat_ = dmat; initialised_ = true; } void InitData(DMatrix* dmat) { if (!initialised_) { monitor_.Start("InitDataOnce"); this->InitDataOnce(dmat); monitor_.Stop("InitDataOnce"); } } // Only call this method for testing void CheckTreesSynchronized(RegTree* local_tree) const { std::string s_model; common::MemoryBufferStream fs(&s_model); int rank = rabit::GetRank(); if (rank == 0) { local_tree->Save(&fs); } fs.Seek(0); rabit::Broadcast(&s_model, 0); RegTree reference_tree {}; // rank 0 tree reference_tree.Load(&fs); CHECK(*local_tree == reference_tree); } void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree) { monitor_.Start("InitData"); this->InitData(p_fmat); monitor_.Stop("InitData"); gpair->SetDevice(device_); maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_); } bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) { if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) { return false; } monitor_.Start("UpdatePredictionCache"); p_out_preds->SetDevice(device_); maker->UpdatePredictionCache(p_out_preds->DeviceSpan()); monitor_.Stop("UpdatePredictionCache"); return true; } TrainParam param_; // NOLINT MetaInfo* info_{}; // NOLINT std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT private: bool initialised_ { false }; GPUHistMakerTrainParam hist_maker_param_; GenericParameter const* generic_param_; dh::AllReducer reducer_; DMatrix* p_last_fmat_ { nullptr }; int device_{-1}; common::Monitor monitor_; }; class GPUHistMaker : public TreeUpdater { public: void Configure(const Args& args) override { // Used in test to count how many configurations are performed LOG(DEBUG) << "[GPU Hist]: Configure"; hist_maker_param_.UpdateAllowUnknown(args); // The passed in args can be empty, if we simply purge the old maker without // preserving parameters then we can't do Update on it. TrainParam param; if (float_maker_) { param = float_maker_->param_; } else if (double_maker_) { param = double_maker_->param_; } if (hist_maker_param_.single_precision_histogram) { float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>()); float_maker_->param_ = param; float_maker_->Configure(args, tparam_); } else { double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>()); double_maker_->param_ = param; double_maker_->Configure(args, tparam_); } } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_); if (hist_maker_param_.single_precision_histogram) { float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>()); FromJson(config.at("train_param"), &float_maker_->param_); } else { double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>()); FromJson(config.at("train_param"), &double_maker_->param_); } } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["gpu_hist_train_param"] = ToJson(hist_maker_param_); if (hist_maker_param_.single_precision_histogram) { out["train_param"] = ToJson(float_maker_->param_); } else { out["train_param"] = ToJson(double_maker_->param_); } } void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { if (hist_maker_param_.single_precision_histogram) { float_maker_->Update(gpair, dmat, trees); } else { double_maker_->Update(gpair, dmat, trees); } } bool UpdatePredictionCache( const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override { if (hist_maker_param_.single_precision_histogram) { return float_maker_->UpdatePredictionCache(data, p_out_preds); } else { return double_maker_->UpdatePredictionCache(data, p_out_preds); } } char const* Name() const override { return "grow_gpu_hist"; } private: GPUHistMakerTrainParam hist_maker_param_; std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_; std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_; }; #if !defined(GTEST_TEST) XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist") .describe("Grow tree with GPU.") .set_body([]() { return new GPUHistMaker(); }); #endif // !defined(GTEST_TEST) } // namespace tree } // namespace xgboost
a7830c1ab0438ed3302ac8ba8f0b1117cfa7207a.cu
/*! * Copyright 2017-2020 XGBoost contributors */ #include <thrust/copy.h> #include <thrust/reduce.h> #include <xgboost/tree_updater.h> #include <algorithm> #include <cmath> #include <memory> #include <limits> #include <utility> #include <vector> #include "xgboost/host_device_vector.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #include "xgboost/json.h" #include "../common/io.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" #include "../common/bitfield.h" #include "../common/timer.h" #include "../common/categorical.h" #include "../data/ellpack_page.cuh" #include "param.h" #include "updater_gpu_common.cuh" #include "split_evaluator.h" #include "constraints.cuh" #include "gpu_hist/feature_groups.cuh" #include "gpu_hist/gradient_based_sampler.cuh" #include "gpu_hist/row_partitioner.cuh" #include "gpu_hist/histogram.cuh" #include "gpu_hist/evaluate_splits.cuh" #include "gpu_hist/driver.cuh" namespace xgboost { namespace tree { #if !defined(GTEST_TEST) DMLC_REGISTRY_FILE_TAG(updater_gpu_hist); #endif // !defined(GTEST_TEST) // training parameters specific to this algorithm struct GPUHistMakerTrainParam : public XGBoostParameter<GPUHistMakerTrainParam> { bool single_precision_histogram; bool deterministic_histogram; bool debug_synchronize; // declare parameters DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) { DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe( "Use single precision to build histograms."); DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe( "Pre-round the gradient for obtaining deterministic gradient histogram."); DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe( "Check if all distributed tree are identical after tree construction."); } }; #if !defined(GTEST_TEST) DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam); #endif // !defined(GTEST_TEST) /** * \struct DeviceHistogram * * \summary Data storage for node histograms on device. Automatically expands. * * \tparam GradientSumT histogram entry type. * \tparam kStopGrowingSize Do not grow beyond this size * * \author Rory * \date 28/07/2018 */ template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26> class DeviceHistogram { private: /*! \brief Map nidx to starting index of its histogram. */ std::map<int, size_t> nidx_map_; dh::device_vector<typename GradientSumT::ValueT> data_; int n_bins_; int device_id_; static constexpr size_t kNumItemsInGradientSum = sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT); static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2."); public: void Init(int device_id, int n_bins) { this->n_bins_ = n_bins; this->device_id_ = device_id; } void Reset() { auto d_data = data_.data().get(); dh::LaunchN(device_id_, data_.size(), [=] __device__(size_t idx) { d_data[idx] = 0.0f; }); nidx_map_.clear(); } bool HistogramExists(int nidx) const { return nidx_map_.find(nidx) != nidx_map_.cend(); } int Bins() const { return n_bins_; } size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; } dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; } void AllocateHistogram(int nidx) { if (HistogramExists(nidx)) return; // Number of items currently used in data const size_t used_size = nidx_map_.size() * HistogramSize(); const size_t new_used_size = used_size + HistogramSize(); if (data_.size() >= kStopGrowingSize) { // Recycle histogram memory if (new_used_size <= data_.size()) { // no need to remove old node, just insert the new one. nidx_map_[nidx] = used_size; // memset histogram size in bytes } else { std::pair<int, size_t> old_entry = *nidx_map_.begin(); nidx_map_.erase(old_entry.first); nidx_map_[nidx] = old_entry.second; } // Zero recycled memory auto d_data = data_.data().get() + nidx_map_[nidx]; dh::LaunchN(device_id_, n_bins_ * 2, [=] __device__(size_t idx) { d_data[idx] = 0.0f; }); } else { // Append new node histogram nidx_map_[nidx] = used_size; // Check there is enough memory for another histogram node if (data_.size() < new_used_size + HistogramSize()) { size_t new_required_memory = std::max(data_.size() * 2, HistogramSize()); data_.resize(new_required_memory); } } CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize()); } /** * \summary Return pointer to histogram memory for a given node. * \param nidx Tree node index. * \return hist pointer. */ common::Span<GradientSumT> GetNodeHistogram(int nidx) { CHECK(this->HistogramExists(nidx)); auto ptr = data_.data().get() + nidx_map_[nidx]; return common::Span<GradientSumT>( reinterpret_cast<GradientSumT*>(ptr), n_bins_); } }; // Manage memory for a single GPU template <typename GradientSumT> struct GPUHistMakerDevice { int device_id; EllpackPageImpl* page; common::Span<FeatureType const> feature_types; BatchParam batch_param; std::unique_ptr<RowPartitioner> row_partitioner; DeviceHistogram<GradientSumT> hist{}; common::Span<GradientPair> gpair; dh::caching_device_vector<int> monotone_constraints; /*! \brief Sum gradient for each node. */ std::vector<GradientPair> node_sum_gradients; TrainParam param; bool deterministic_histogram; GradientSumT histogram_rounding; dh::PinnedMemory pinned; std::vector<cudaStream_t> streams{}; common::Monitor monitor; TreeEvaluator tree_evaluator; common::ColumnSampler column_sampler; FeatureInteractionConstraintDevice interaction_constraints; std::unique_ptr<GradientBasedSampler> sampler; std::unique_ptr<FeatureGroups> feature_groups; // Storing split categories for last node. dh::caching_device_vector<uint32_t> node_categories; GPUHistMakerDevice(int _device_id, EllpackPageImpl* _page, common::Span<FeatureType const> _feature_types, bst_uint _n_rows, TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features, bool deterministic_histogram, BatchParam _batch_param) : device_id(_device_id), page(_page), feature_types{_feature_types}, param(std::move(_param)), tree_evaluator(param, n_features, _device_id), column_sampler(column_sampler_seed), interaction_constraints(param, n_features), deterministic_histogram{deterministic_histogram}, batch_param(_batch_param) { sampler.reset(new GradientBasedSampler( page, _n_rows, batch_param, param.subsample, param.sampling_method)); if (!param.monotone_constraints.empty()) { // Copy assigning an empty vector causes an exception in MSVC debug builds monotone_constraints = param.monotone_constraints; } node_sum_gradients.resize(param.MaxNodes()); // Init histogram hist.Init(device_id, page->Cuts().TotalBins()); monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id)); feature_groups.reset(new FeatureGroups( page->Cuts(), page->is_dense, dh::MaxSharedMemoryOptin(device_id), sizeof(GradientSumT))); } ~GPUHistMakerDevice() { // NOLINT dh::safe_cuda(cudaSetDevice(device_id)); for (auto& stream : streams) { dh::safe_cuda(cudaStreamDestroy(stream)); } } // Get vector of at least n initialised streams std::vector<cudaStream_t>& GetStreams(int n) { if (n > streams.size()) { for (auto& stream : streams) { dh::safe_cuda(cudaStreamDestroy(stream)); } streams.clear(); streams.resize(n); for (auto& stream : streams) { dh::safe_cuda(cudaStreamCreate(&stream)); } } return streams; } // Reset values for each update iteration // Note that the column sampler must be passed by value because it is not // thread safe void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) { auto const& info = dmat->Info(); this->column_sampler.Init(num_columns, info.feature_weigths.HostVector(), param.colsample_bynode, param.colsample_bylevel, param.colsample_bytree); dh::safe_cuda(cudaSetDevice(device_id)); tree_evaluator = TreeEvaluator(param, dmat->Info().num_col_, device_id); this->interaction_constraints.Reset(); std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), GradientPair()); auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat); page = sample.page; gpair = sample.gpair; if (deterministic_histogram) { histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair); } else { histogram_rounding = GradientSumT{0.0, 0.0}; } row_partitioner.reset(); // Release the device memory first before reallocating row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows)); hist.Reset(); } DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) { int nidx = RegTree::kRoot; dh::TemporaryArray<DeviceSplitCandidate> splits_out(1); GPUTrainingParam gpu_param(param); auto sampled_features = column_sampler.GetFeatureSet(0); sampled_features->SetDevice(device_id); common::Span<bst_feature_t> feature_set = interaction_constraints.Query(sampled_features->DeviceSpan(), nidx); auto matrix = page->GetDeviceAccessor(device_id); EvaluateSplitInputs<GradientSumT> inputs{ nidx, {root_sum.GetGrad(), root_sum.GetHess()}, gpu_param, feature_set, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map, matrix.min_fvalue, hist.GetNodeHistogram(nidx)}; auto gain_calc = tree_evaluator.GetEvaluator<GPUTrainingParam>(); EvaluateSingleSplit(dh::ToSpan(splits_out), gain_calc, inputs); std::vector<DeviceSplitCandidate> result(1); dh::safe_cuda(cudaMemcpy(result.data(), splits_out.data().get(), sizeof(DeviceSplitCandidate) * splits_out.size(), cudaMemcpyDeviceToHost)); return result.front(); } void EvaluateLeftRightSplits( ExpandEntry candidate, int left_nidx, int right_nidx, const RegTree& tree, common::Span<ExpandEntry> pinned_candidates_out) { dh::TemporaryArray<DeviceSplitCandidate> splits_out(2); GPUTrainingParam gpu_param(param); auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx)); left_sampled_features->SetDevice(device_id); common::Span<bst_feature_t> left_feature_set = interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx); auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx)); right_sampled_features->SetDevice(device_id); common::Span<bst_feature_t> right_feature_set = interaction_constraints.Query(right_sampled_features->DeviceSpan(), left_nidx); auto matrix = page->GetDeviceAccessor(device_id); EvaluateSplitInputs<GradientSumT> left{ left_nidx, {candidate.split.left_sum.GetGrad(), candidate.split.left_sum.GetHess()}, gpu_param, left_feature_set, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map, matrix.min_fvalue, hist.GetNodeHistogram(left_nidx)}; EvaluateSplitInputs<GradientSumT> right{ right_nidx, {candidate.split.right_sum.GetGrad(), candidate.split.right_sum.GetHess()}, gpu_param, right_feature_set, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map, matrix.min_fvalue, hist.GetNodeHistogram(right_nidx)}; auto d_splits_out = dh::ToSpan(splits_out); EvaluateSplits(d_splits_out, tree_evaluator.GetEvaluator<GPUTrainingParam>(), left, right); dh::TemporaryArray<ExpandEntry> entries(2); auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>(); auto d_entries = entries.data().get(); dh::LaunchN(device_id, 2, [=] __device__(size_t idx) { auto split = d_splits_out[idx]; auto nidx = idx == 0 ? left_nidx : right_nidx; float base_weight = evaluator.CalcWeight( nidx, gpu_param, GradStats{split.left_sum + split.right_sum}); float left_weight = evaluator.CalcWeight(nidx, gpu_param, GradStats{split.left_sum}); float right_weight = evaluator.CalcWeight( nidx, gpu_param, GradStats{split.right_sum}); d_entries[idx] = ExpandEntry{nidx, candidate.depth + 1, d_splits_out[idx], base_weight, left_weight, right_weight}; }); dh::safe_cuda(cudaMemcpyAsync( pinned_candidates_out.data(), entries.data().get(), sizeof(ExpandEntry) * entries.size(), cudaMemcpyDeviceToHost)); } void BuildHist(int nidx) { hist.AllocateHistogram(nidx); auto d_node_hist = hist.GetNodeHistogram(nidx); auto d_ridx = row_partitioner->GetRows(nidx); BuildGradientHistogram(page->GetDeviceAccessor(device_id), feature_groups->DeviceAccessor(device_id), gpair, d_ridx, d_node_hist, histogram_rounding); } void SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent); auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram); auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction); dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) { d_node_hist_subtraction[idx] = d_node_hist_parent[idx] - d_node_hist_histogram[idx]; }); } bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { // Make sure histograms are already allocated hist.AllocateHistogram(nidx_subtraction); return hist.HistogramExists(nidx_histogram) && hist.HistogramExists(nidx_parent); } void UpdatePosition(int nidx, RegTree* p_tree) { RegTree::Node split_node = (*p_tree)[nidx]; auto split_type = p_tree->NodeSplitType(nidx); auto d_matrix = page->GetDeviceAccessor(device_id); auto node_cats = dh::ToSpan(node_categories); row_partitioner->UpdatePosition( nidx, split_node.LeftChild(), split_node.RightChild(), [=] __device__(bst_uint ridx) { // given a row index, returns the node id it belongs to bst_float cut_value = d_matrix.GetFvalue(ridx, split_node.SplitIndex()); // Missing value bst_node_t new_position = 0; if (isnan(cut_value)) { new_position = split_node.DefaultChild(); } else { bool go_left = true; if (split_type == FeatureType::kCategorical) { go_left = common::Decision(node_cats, common::AsCat(cut_value)); } else { go_left = cut_value <= split_node.SplitCond(); } if (go_left) { new_position = split_node.LeftChild(); } else { new_position = split_node.RightChild(); } } return new_position; }); } // After tree update is finished, update the position of all training // instances to their final leaf. This information is used later to update the // prediction cache void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) { dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size()); dh::safe_cuda(cudaMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(), d_nodes.size() * sizeof(RegTree::Node), cudaMemcpyHostToDevice)); auto const& h_split_types = p_tree->GetSplitTypes(); auto const& categories = p_tree->GetSplitCategories(); auto const& categories_segments = p_tree->GetSplitCategoriesPtr(); dh::caching_device_vector<FeatureType> d_split_types; dh::caching_device_vector<uint32_t> d_categories; dh::caching_device_vector<RegTree::Segment> d_categories_segments; if (!categories.empty()) { dh::CopyToD(h_split_types, &d_split_types); dh::CopyToD(categories, &d_categories); dh::CopyToD(categories_segments, &d_categories_segments); } if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) { row_partitioner.reset(); // Release the device memory first before reallocating row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_)); } if (page->n_rows == p_fmat->Info().num_row_) { FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types), dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments)); } else { for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) { FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes), dh::ToSpan(d_split_types), dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments)); } } } void FinalisePositionInPage(EllpackPageImpl *page, const common::Span<RegTree::Node> d_nodes, common::Span<FeatureType const> d_feature_types, common::Span<uint32_t const> categories, common::Span<RegTree::Segment> categories_segments) { auto d_matrix = page->GetDeviceAccessor(device_id); row_partitioner->FinalisePosition( [=] __device__(size_t row_id, int position) { // What happens if user prune the tree? if (!d_matrix.IsInRange(row_id)) { return RowPartitioner::kIgnoredTreePosition; } auto node = d_nodes[position]; while (!node.IsLeaf()) { bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex()); // Missing value if (isnan(element)) { position = node.DefaultChild(); } else { bool go_left = true; if (common::IsCat(d_feature_types, position)) { auto node_cats = categories.subspan(categories_segments[position].beg, categories_segments[position].size); go_left = common::Decision(node_cats, common::AsCat(element)); } else { go_left = element <= node.SplitCond(); } if (go_left) { position = node.LeftChild(); } else { position = node.RightChild(); } } node = d_nodes[position]; } return position; }); } void UpdatePredictionCache(common::Span<bst_float> out_preds_d) { dh::safe_cuda(cudaSetDevice(device_id)); auto d_ridx = row_partitioner->GetRows(); GPUTrainingParam param_d(param); dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size()); dh::safe_cuda( cudaMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(), sizeof(GradientPair) * node_sum_gradients.size(), cudaMemcpyHostToDevice)); auto d_position = row_partitioner->GetPosition(); auto d_node_sum_gradients = device_node_sum_gradients.data().get(); auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>(); dh::LaunchN( device_id, out_preds_d.size(), [=] __device__(int local_idx) { int pos = d_position[local_idx]; bst_float weight = evaluator.CalcWeight(pos, param_d, GradStats{d_node_sum_gradients[pos]}); out_preds_d[d_ridx[local_idx]] += weight * param_d.learning_rate; }); row_partitioner.reset(); } void AllReduceHist(int nidx, dh::AllReducer* reducer) { monitor.Start("AllReduce"); auto d_node_hist = hist.GetNodeHistogram(nidx).data(); reducer->AllReduceSum( reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist), reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist), page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT))); monitor.Stop("AllReduce"); } /** * \brief Build GPU local histograms for the left and right child of some parent node */ void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left, int nidx_right, dh::AllReducer* reducer) { auto build_hist_nidx = nidx_left; auto subtraction_trick_nidx = nidx_right; // Decide whether to build the left histogram or right histogram // Use sum of Hessian as a heuristic to select node with fewest training instances bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess(); if (fewer_right) { std::swap(build_hist_nidx, subtraction_trick_nidx); } this->BuildHist(build_hist_nidx); this->AllReduceHist(build_hist_nidx, reducer); // Check whether we can use the subtraction trick to calculate the other bool do_subtraction_trick = this->CanDoSubtractionTrick( candidate.nid, build_hist_nidx, subtraction_trick_nidx); if (do_subtraction_trick) { // Calculate other histogram using subtraction trick this->SubtractionTrick(candidate.nid, build_hist_nidx, subtraction_trick_nidx); } else { // Calculate other histogram manually this->BuildHist(subtraction_trick_nidx); this->AllReduceHist(subtraction_trick_nidx, reducer); } } void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) { RegTree& tree = *p_tree; auto evaluator = tree_evaluator.GetEvaluator(); auto parent_sum = candidate.split.left_sum + candidate.split.right_sum; auto base_weight = candidate.base_weight; auto left_weight = candidate.left_weight * param.learning_rate; auto right_weight = candidate.right_weight * param.learning_rate; auto is_cat = candidate.split.is_cat; if (is_cat) { auto cat = common::AsCat(candidate.split.fvalue); std::vector<uint32_t> split_cats(LBitField32::ComputeStorageSize(std::max(cat+1, 1)), 0); LBitField32 cats_bits(split_cats); cats_bits.Set(cat); dh::CopyToD(split_cats, &node_categories); tree.ExpandCategorical( candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.GetHess(), candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess()); } else { tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue, candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.GetHess(), candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess()); } // Set up child constraints auto left_child = tree[candidate.nid].LeftChild(); auto right_child = tree[candidate.nid].RightChild(); tree_evaluator.AddSplit(candidate.nid, left_child, right_child, tree[candidate.nid].SplitIndex(), candidate.left_weight, candidate.right_weight); node_sum_gradients[tree[candidate.nid].LeftChild()] = candidate.split.left_sum; node_sum_gradients[tree[candidate.nid].RightChild()] = candidate.split.right_sum; interaction_constraints.Split( candidate.nid, tree[candidate.nid].SplitIndex(), tree[candidate.nid].LeftChild(), tree[candidate.nid].RightChild()); } ExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) { constexpr bst_node_t kRootNIdx = 0; dh::XGBCachingDeviceAllocator<char> alloc; GradientPair root_sum = dh::Reduce( thrust::cuda::par(alloc), thrust::device_ptr<GradientPair const>(gpair.data()), thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()), GradientPair{}, thrust::plus<GradientPair>{}); rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum), 2); this->BuildHist(kRootNIdx); this->AllReduceHist(kRootNIdx, reducer); // Remember root stats node_sum_gradients[kRootNIdx] = root_sum; p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess(); auto weight = CalcWeight(param, root_sum); p_tree->Stat(kRootNIdx).base_weight = weight; (*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight); // Generate first split auto split = this->EvaluateRootSplit(root_sum); dh::TemporaryArray<ExpandEntry> entries(1); auto d_entries = entries.data().get(); auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>(); GPUTrainingParam gpu_param(param); auto depth = p_tree->GetDepth(kRootNIdx); dh::LaunchN(device_id, 1, [=] __device__(size_t idx) { float left_weight = evaluator.CalcWeight(kRootNIdx, gpu_param, GradStats{split.left_sum}); float right_weight = evaluator.CalcWeight( kRootNIdx, gpu_param, GradStats{split.right_sum}); d_entries[0] = ExpandEntry(kRootNIdx, depth, split, weight, left_weight, right_weight); }); ExpandEntry root_entry; dh::safe_cuda(cudaMemcpyAsync( &root_entry, entries.data().get(), sizeof(ExpandEntry) * entries.size(), cudaMemcpyDeviceToHost)); return root_entry; } void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, RegTree* p_tree, dh::AllReducer* reducer) { auto& tree = *p_tree; Driver driver(static_cast<TrainParam::TreeGrowPolicy>(param.grow_policy)); monitor.Start("Reset"); this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_); monitor.Stop("Reset"); monitor.Start("InitRoot"); driver.Push({ this->InitRoot(p_tree, reducer) }); monitor.Stop("InitRoot"); auto num_leaves = 1; // The set of leaves that can be expanded asynchronously auto expand_set = driver.Pop(); while (!expand_set.empty()) { auto new_candidates = pinned.GetSpan<ExpandEntry>(expand_set.size() * 2, ExpandEntry()); for (auto i = 0ull; i < expand_set.size(); i++) { auto candidate = expand_set.at(i); if (!candidate.IsValid(param, num_leaves)) { continue; } this->ApplySplit(candidate, p_tree); num_leaves++; int left_child_nidx = tree[candidate.nid].LeftChild(); int right_child_nidx = tree[candidate.nid].RightChild(); // Only create child entries if needed if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx), num_leaves)) { monitor.Start("UpdatePosition"); this->UpdatePosition(candidate.nid, p_tree); monitor.Stop("UpdatePosition"); monitor.Start("BuildHist"); this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer); monitor.Stop("BuildHist"); monitor.Start("EvaluateSplits"); this->EvaluateLeftRightSplits(candidate, left_child_nidx, right_child_nidx, *p_tree, new_candidates.subspan(i * 2, 2)); monitor.Stop("EvaluateSplits"); } else { // Set default new_candidates[i * 2] = ExpandEntry(); new_candidates[i * 2 + 1] = ExpandEntry(); } } dh::safe_cuda(cudaDeviceSynchronize()); driver.Push(new_candidates.begin(), new_candidates.end()); expand_set = driver.Pop(); } monitor.Start("FinalisePosition"); this->FinalisePosition(p_tree, p_fmat); monitor.Stop("FinalisePosition"); } }; template <typename GradientSumT> class GPUHistMakerSpecialised { public: GPUHistMakerSpecialised() = default; void Configure(const Args& args, GenericParameter const* generic_param) { param_.UpdateAllowUnknown(args); generic_param_ = generic_param; hist_maker_param_.UpdateAllowUnknown(args); dh::CheckComputeCapability(); monitor_.Init("updater_gpu_hist"); } ~GPUHistMakerSpecialised() { // NOLINT dh::GlobalMemoryLogger().Log(); } void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) { monitor_.Start("Update"); // rescale learning rate according to size of trees float lr = param_.learning_rate; param_.learning_rate = lr / trees.size(); // build tree try { for (xgboost::RegTree* tree : trees) { this->UpdateTree(gpair, dmat, tree); if (hist_maker_param_.debug_synchronize) { this->CheckTreesSynchronized(tree); } } dh::safe_cuda(cudaGetLastError()); } catch (const std::exception& e) { LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl; } param_.learning_rate = lr; monitor_.Stop("Update"); } void InitDataOnce(DMatrix* dmat) { device_ = generic_param_->gpu_id; CHECK_GE(device_, 0) << "Must have at least one device"; info_ = &dmat->Info(); reducer_.Init({device_}); // NOLINT // Synchronise the column sampling seed uint32_t column_sampling_seed = common::GlobalRandom()(); rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0); BatchParam batch_param{ device_, param_.max_bin, generic_param_->gpu_page_size }; auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl(); dh::safe_cuda(cudaSetDevice(device_)); info_->feature_types.SetDevice(device_); maker.reset(new GPUHistMakerDevice<GradientSumT>(device_, page, info_->feature_types.ConstDeviceSpan(), info_->num_row_, param_, column_sampling_seed, info_->num_col_, hist_maker_param_.deterministic_histogram, batch_param)); p_last_fmat_ = dmat; initialised_ = true; } void InitData(DMatrix* dmat) { if (!initialised_) { monitor_.Start("InitDataOnce"); this->InitDataOnce(dmat); monitor_.Stop("InitDataOnce"); } } // Only call this method for testing void CheckTreesSynchronized(RegTree* local_tree) const { std::string s_model; common::MemoryBufferStream fs(&s_model); int rank = rabit::GetRank(); if (rank == 0) { local_tree->Save(&fs); } fs.Seek(0); rabit::Broadcast(&s_model, 0); RegTree reference_tree {}; // rank 0 tree reference_tree.Load(&fs); CHECK(*local_tree == reference_tree); } void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree) { monitor_.Start("InitData"); this->InitData(p_fmat); monitor_.Stop("InitData"); gpair->SetDevice(device_); maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_); } bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) { if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) { return false; } monitor_.Start("UpdatePredictionCache"); p_out_preds->SetDevice(device_); maker->UpdatePredictionCache(p_out_preds->DeviceSpan()); monitor_.Stop("UpdatePredictionCache"); return true; } TrainParam param_; // NOLINT MetaInfo* info_{}; // NOLINT std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT private: bool initialised_ { false }; GPUHistMakerTrainParam hist_maker_param_; GenericParameter const* generic_param_; dh::AllReducer reducer_; DMatrix* p_last_fmat_ { nullptr }; int device_{-1}; common::Monitor monitor_; }; class GPUHistMaker : public TreeUpdater { public: void Configure(const Args& args) override { // Used in test to count how many configurations are performed LOG(DEBUG) << "[GPU Hist]: Configure"; hist_maker_param_.UpdateAllowUnknown(args); // The passed in args can be empty, if we simply purge the old maker without // preserving parameters then we can't do Update on it. TrainParam param; if (float_maker_) { param = float_maker_->param_; } else if (double_maker_) { param = double_maker_->param_; } if (hist_maker_param_.single_precision_histogram) { float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>()); float_maker_->param_ = param; float_maker_->Configure(args, tparam_); } else { double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>()); double_maker_->param_ = param; double_maker_->Configure(args, tparam_); } } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_); if (hist_maker_param_.single_precision_histogram) { float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>()); FromJson(config.at("train_param"), &float_maker_->param_); } else { double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>()); FromJson(config.at("train_param"), &double_maker_->param_); } } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["gpu_hist_train_param"] = ToJson(hist_maker_param_); if (hist_maker_param_.single_precision_histogram) { out["train_param"] = ToJson(float_maker_->param_); } else { out["train_param"] = ToJson(double_maker_->param_); } } void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { if (hist_maker_param_.single_precision_histogram) { float_maker_->Update(gpair, dmat, trees); } else { double_maker_->Update(gpair, dmat, trees); } } bool UpdatePredictionCache( const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override { if (hist_maker_param_.single_precision_histogram) { return float_maker_->UpdatePredictionCache(data, p_out_preds); } else { return double_maker_->UpdatePredictionCache(data, p_out_preds); } } char const* Name() const override { return "grow_gpu_hist"; } private: GPUHistMakerTrainParam hist_maker_param_; std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_; std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_; }; #if !defined(GTEST_TEST) XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist") .describe("Grow tree with GPU.") .set_body([]() { return new GPUHistMaker(); }); #endif // !defined(GTEST_TEST) } // namespace tree } // namespace xgboost
3574139c76c0bbffc03bdf7764c50e2fe8a9da65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; float pixelVal = (float) (filter[4] * inputChannel[thread_2D_pos.y * numCols + thread_2D_pos.x]); if(thread_2D_pos.x - 1 >= 0 && thread_2D_pos.y - 1 >= 0) pixelVal = pixelVal + (float) (filter[0] * inputChannel[(thread_2D_pos.y - 1) * numCols + (thread_2D_pos.x - 1)]); if(thread_2D_pos.y - 1 >= 0) pixelVal = pixelVal + (float) (filter[1] * inputChannel[(thread_2D_pos.y - 1) * numCols + thread_2D_pos.x]); if(thread_2D_pos.x + 1 <= numCols && thread_2D_pos.y - 1 >= 0) pixelVal = pixelVal + (float) (filter[2] * inputChannel[(thread_2D_pos.y - 1) * numCols + (thread_2D_pos.x + 1)]); if(thread_2D_pos.x -1 >= 0) pixelVal = pixelVal + (float) (filter[3] * inputChannel[thread_2D_pos.y * numCols + (thread_2D_pos.x - 1)]); if(thread_2D_pos.x + 1 <= numCols) pixelVal = pixelVal + (float) (filter[5] * inputChannel[thread_2D_pos.y * numCols + (thread_2D_pos.x + 1)]); if(thread_2D_pos.x - 1 >= 0 && thread_2D_pos.y + 1 <= numRows) pixelVal = pixelVal + (float) (filter[6] * inputChannel[(thread_2D_pos.y + 1) * numCols + (thread_2D_pos.x - 1)]); if(thread_2D_pos.y + 1 <= numRows) pixelVal = pixelVal + (float) (filter[7] * inputChannel[(thread_2D_pos.y + 1) * numCols + thread_2D_pos.x]); if(thread_2D_pos.x + 1 <= numCols && thread_2D_pos.y + 1 <= numRows) pixelVal = pixelVal + (float) (filter[8] * inputChannel[(thread_2D_pos.y + 1) * numCols + (thread_2D_pos.y + 1)]); outputChannel[thread_2D_pos.y * numCols + thread_2D_pos.x] = (unsigned char) pixelVal; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; uchar4 inputPixel = inputImageRGBA[thread_1D_pos]; unsigned char red = inputPixel.x; unsigned char green = inputPixel.y; unsigned char blue = inputPixel.z; redChannel[thread_1D_pos] = red; greenChannel[thread_1D_pos] = green; blueChannel[thread_1D_pos] = blue; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(unsigned char) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(unsigned char) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) int blockWidth = 32; const dim3 blockSize(blockWidth, blockWidth, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. int blocksX = numRows/blockWidth + 1; int blocksY = numCols/blockWidth + 1; const dim3 gridSize(blocksX, blocksY, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
3574139c76c0bbffc03bdf7764c50e2fe8a9da65.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; float pixelVal = (float) (filter[4] * inputChannel[thread_2D_pos.y * numCols + thread_2D_pos.x]); if(thread_2D_pos.x - 1 >= 0 && thread_2D_pos.y - 1 >= 0) pixelVal = pixelVal + (float) (filter[0] * inputChannel[(thread_2D_pos.y - 1) * numCols + (thread_2D_pos.x - 1)]); if(thread_2D_pos.y - 1 >= 0) pixelVal = pixelVal + (float) (filter[1] * inputChannel[(thread_2D_pos.y - 1) * numCols + thread_2D_pos.x]); if(thread_2D_pos.x + 1 <= numCols && thread_2D_pos.y - 1 >= 0) pixelVal = pixelVal + (float) (filter[2] * inputChannel[(thread_2D_pos.y - 1) * numCols + (thread_2D_pos.x + 1)]); if(thread_2D_pos.x -1 >= 0) pixelVal = pixelVal + (float) (filter[3] * inputChannel[thread_2D_pos.y * numCols + (thread_2D_pos.x - 1)]); if(thread_2D_pos.x + 1 <= numCols) pixelVal = pixelVal + (float) (filter[5] * inputChannel[thread_2D_pos.y * numCols + (thread_2D_pos.x + 1)]); if(thread_2D_pos.x - 1 >= 0 && thread_2D_pos.y + 1 <= numRows) pixelVal = pixelVal + (float) (filter[6] * inputChannel[(thread_2D_pos.y + 1) * numCols + (thread_2D_pos.x - 1)]); if(thread_2D_pos.y + 1 <= numRows) pixelVal = pixelVal + (float) (filter[7] * inputChannel[(thread_2D_pos.y + 1) * numCols + thread_2D_pos.x]); if(thread_2D_pos.x + 1 <= numCols && thread_2D_pos.y + 1 <= numRows) pixelVal = pixelVal + (float) (filter[8] * inputChannel[(thread_2D_pos.y + 1) * numCols + (thread_2D_pos.y + 1)]); outputChannel[thread_2D_pos.y * numCols + thread_2D_pos.x] = (unsigned char) pixelVal; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; uchar4 inputPixel = inputImageRGBA[thread_1D_pos]; unsigned char red = inputPixel.x; unsigned char green = inputPixel.y; unsigned char blue = inputPixel.z; redChannel[thread_1D_pos] = red; greenChannel[thread_1D_pos] = green; blueChannel[thread_1D_pos] = blue; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(unsigned char) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(unsigned char) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) int blockWidth = 32; const dim3 blockSize(blockWidth, blockWidth, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. int blocksX = numRows/blockWidth + 1; int blocksY = numCols/blockWidth + 1; const dim3 gridSize(blocksX, blocksY, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
d67fc925fb33481988bbfbe411af82a9995fd04f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is machine problem 1, part 1, shift problem * * The problem is to take in a string (a vector of characters) and a shift amount, * and add that number to each element of * the string, effectively "shifting" each element in the * string. * * We do this in three different ways: * 1. With a cuda kernel loading chars and outputting chars for each thread * 2. With a cuda kernel, casting the character pointer to an int so that * we load and store 4 bytes each time instead of 1 which gives us better coalescing * and uses the memory effectively to achieve higher bandwidth * 3. Same spiel except with a uint2, so that we load 8 bytes each time * */ #include <stdlib.h> #include <iostream> #include <iomanip> #include <ctime> #include <fstream> #include <vector> #include "mp1-util.h" #include "shift.hip" const char kMobyDick[] = "common/mobydick.txt"; void host_shift(std::vector<unsigned char>& input_array , std::vector<unsigned char>& output_array , unsigned char shift_amount) { for(unsigned int i=0; i<input_array.size(); i++) { unsigned char element = input_array[i]; output_array[i] = element + shift_amount; } } void checkResults(std::vector<unsigned char>& text_host , unsigned char* device_output_array , int num_entries , const char* type) { //allocate space on host for gpu results std::vector<unsigned char> text_from_gpu(num_entries); // download and inspect the result on the host: hipMemcpy(&text_from_gpu[0], device_output_array, num_entries, hipMemcpyDeviceToHost); check_launch("copy from gpu"); // check CUDA output versus reference output int error = 0; for(int i = 0; i < num_entries; i++) { if(text_host[i] != text_from_gpu[i]) { ++error; std::cerr << "Mismatch at pos: " << i << "\nExpected " << (int)text_host[i] << " and got " << (int)text_from_gpu[i] << std::endl; if(error > 10) { std::cerr << "\nToo many errors, quitting" << std::endl; break; } } } if(error) { std::cerr << "\nError(s) in " << type << " kernel!" << std::endl; exit(1); } } int main(int argc, char** argv) { //check that the correct number of command line arguments were given if(argc != 2) { std::cerr << "Must supply the number of times to double the input file!" << std::endl; return 1; } int number_of_doubles = atoi(argv[1]); //convert argument to integer hipFree(0); //initialize cuda context to avoid including cost in timings later //warm-up each of the kernels to avoid including overhead in timing //if the kernels are written correctly then they should //never make a bad memory access, even though we are passing in NULL //pointers since we are also passing in a size of 0 hipLaunchKernelGGL(( shift_char) , dim3(1), dim3(1), 0, 0, NULL, NULL, 0, 0); hipLaunchKernelGGL(( shift_int) , dim3(1), dim3(1), 0, 0, NULL, NULL, 0, 0); hipLaunchKernelGGL(( shift_int2) , dim3(1), dim3(1), 0, 0, NULL, NULL, 0, 0); //First load the text std::string input_file(kMobyDick); std::ifstream ifs(input_file.c_str(), std::ios::binary); if(!ifs.good()) { std::cerr << "Couldn't open " << input_file << "!" << std::endl; return 1; } std::vector<unsigned char> text; ifs.seekg(0, std::ios::end); //seek to end of file int length = ifs.tellg(); //get distance from beginning ifs.seekg(0, std::ios::beg); //move back to beginning text.resize(length); ifs.read((char*)&text[0], length); ifs.close(); //need to make a couple copies of the book, otherwise everything happens too quickly //make 2^4 = 16 copies std::vector<uint> sizes_to_test; sizes_to_test.push_back(text.size()); for(int i = 0; i < number_of_doubles; ++i) { text.insert(text.end(), text.begin(), text.end()); sizes_to_test.push_back(text.size()); } // allocate host arrays std::vector<unsigned char> text_gpu(text.size()); std::vector<unsigned char> text_host(text.size()); // Compute the size of the arrays in bytes for memory allocation. // We need enough padding so that the uint2 access won't be out of bounds. const int num_bytes_alloc = (text.size() + 7) * sizeof(unsigned char); // pointers to device arrays unsigned char* device_input_array = 0; unsigned char* device_output_array = 0; // hipMalloc device arrays hipMalloc((void**)&device_input_array, num_bytes_alloc); hipMalloc((void**)&device_output_array, num_bytes_alloc); // set the padding to 0 to avoid overflow. hipMemset(device_input_array + text.size(), 0, num_bytes_alloc - text.size()); // if either memory allocation failed, report an error message if(device_input_array == 0 || device_output_array == 0) { std::cerr << "Couldn't allocate memory!" << std::endl; return 1; } // generate random shift unsigned char shift_amount = (rand() % 25) + 1; //we don't want the shift to be 0! /* Size of text in bytes. This is the largest size that was allocated. */ const int num_bytes = text.size() * sizeof(unsigned char); // copy input to GPU { event_pair timer; start_timer(&timer); hipMemcpy(device_input_array, &text[0], text.size(), hipMemcpyHostToDevice); check_launch("copy to gpu"); double elapsed_time_h2d = stop_timer(&timer); std::cout << "Host -> Device transfer bandwidth " << num_bytes / (elapsed_time_h2d / 1000.) / 1E9 << std::endl << std::endl; } // generate reference output { event_pair timer; start_timer(&timer); host_shift(text, text_host, shift_amount); double elapsed_time_host = stop_timer(&timer); std::cout << "Host (reference) solution bandwidth GB/sec: " << 2 * num_bytes / (elapsed_time_host / 1000.) / 1E9 << std::endl << std::endl; } // CUDA block size const int block_size = 256; std::cout << std::setw(45) << "Device Bandwidth GB/sec" << std::endl; std::cout << std::setw(70) << std::setfill('-') << " " << std::endl << std::setfill(' '); std::cout << std::setw(15) << " " << std::setw(15) << "char" << std::setw( 15) << "uint" << std::setw(15) << "uint2" << std::endl; std::cout << std::setw(15) << "Problem Size MB" << std::endl; // Loop through all the problem sizes and generate timing / bandwidth information for each // and also check correctness for(uint i = 0; i < sizes_to_test.size(); ++i) { // generate GPU char output double elapsed_time_char = doGPUShiftChar(device_input_array, device_output_array, shift_amount, sizes_to_test[i], block_size); checkResults(text_host, device_output_array, sizes_to_test[i], "char"); // make sure we don't falsely say the next kernel is correct because we've left the correct answer sitting in memory hipMemset(device_output_array, 0, sizes_to_test[i]); // generate GPU uint output double elapsed_time_uint = doGPUShiftUInt(device_input_array, device_output_array, shift_amount, sizes_to_test[i], block_size); checkResults(text_host, device_output_array, sizes_to_test[i], "uint"); // make sure we don't falsely say the next kernel is correct because we've left the correct answer sitting in memory hipMemset(device_output_array, 0, sizes_to_test[i]); // generate GPU uint2 output double elapsed_time_uint2 = doGPUShiftUInt2(device_input_array, device_output_array, shift_amount, sizes_to_test[i], block_size); checkResults(text_host, device_output_array, sizes_to_test[i], "uint2"); // make sure we don't falsely say the next kernel is correct because we've left the correct answer sitting in memory hipMemset(device_output_array, 0, sizes_to_test[i]); std::cout << std::setw(15) << sizes_to_test[i] / 1E6 << " " << std::setw(15) << 2 * sizes_to_test[i] / (elapsed_time_char / 1000.) / 1E9 << std::setw(15) << 2 * sizes_to_test[i] / (elapsed_time_uint / 1000.) / 1E9 << std::setw(15) << 2 * sizes_to_test[i] / (elapsed_time_uint2 / 1000.) / 1E9 << std::endl; } // deallocate memory hipFree(device_input_array); hipFree(device_output_array); }
d67fc925fb33481988bbfbe411af82a9995fd04f.cu
/* This is machine problem 1, part 1, shift problem * * The problem is to take in a string (a vector of characters) and a shift amount, * and add that number to each element of * the string, effectively "shifting" each element in the * string. * * We do this in three different ways: * 1. With a cuda kernel loading chars and outputting chars for each thread * 2. With a cuda kernel, casting the character pointer to an int so that * we load and store 4 bytes each time instead of 1 which gives us better coalescing * and uses the memory effectively to achieve higher bandwidth * 3. Same spiel except with a uint2, so that we load 8 bytes each time * */ #include <stdlib.h> #include <iostream> #include <iomanip> #include <ctime> #include <fstream> #include <vector> #include "mp1-util.h" #include "shift.cu" const char kMobyDick[] = "common/mobydick.txt"; void host_shift(std::vector<unsigned char>& input_array , std::vector<unsigned char>& output_array , unsigned char shift_amount) { for(unsigned int i=0; i<input_array.size(); i++) { unsigned char element = input_array[i]; output_array[i] = element + shift_amount; } } void checkResults(std::vector<unsigned char>& text_host , unsigned char* device_output_array , int num_entries , const char* type) { //allocate space on host for gpu results std::vector<unsigned char> text_from_gpu(num_entries); // download and inspect the result on the host: cudaMemcpy(&text_from_gpu[0], device_output_array, num_entries, cudaMemcpyDeviceToHost); check_launch("copy from gpu"); // check CUDA output versus reference output int error = 0; for(int i = 0; i < num_entries; i++) { if(text_host[i] != text_from_gpu[i]) { ++error; std::cerr << "Mismatch at pos: " << i << "\nExpected " << (int)text_host[i] << " and got " << (int)text_from_gpu[i] << std::endl; if(error > 10) { std::cerr << "\nToo many errors, quitting" << std::endl; break; } } } if(error) { std::cerr << "\nError(s) in " << type << " kernel!" << std::endl; exit(1); } } int main(int argc, char** argv) { //check that the correct number of command line arguments were given if(argc != 2) { std::cerr << "Must supply the number of times to double the input file!" << std::endl; return 1; } int number_of_doubles = atoi(argv[1]); //convert argument to integer cudaFree(0); //initialize cuda context to avoid including cost in timings later //warm-up each of the kernels to avoid including overhead in timing //if the kernels are written correctly then they should //never make a bad memory access, even though we are passing in NULL //pointers since we are also passing in a size of 0 shift_char <<<1, 1>>>(NULL, NULL, 0, 0); shift_int <<<1, 1>>>(NULL, NULL, 0, 0); shift_int2 <<<1, 1>>>(NULL, NULL, 0, 0); //First load the text std::string input_file(kMobyDick); std::ifstream ifs(input_file.c_str(), std::ios::binary); if(!ifs.good()) { std::cerr << "Couldn't open " << input_file << "!" << std::endl; return 1; } std::vector<unsigned char> text; ifs.seekg(0, std::ios::end); //seek to end of file int length = ifs.tellg(); //get distance from beginning ifs.seekg(0, std::ios::beg); //move back to beginning text.resize(length); ifs.read((char*)&text[0], length); ifs.close(); //need to make a couple copies of the book, otherwise everything happens too quickly //make 2^4 = 16 copies std::vector<uint> sizes_to_test; sizes_to_test.push_back(text.size()); for(int i = 0; i < number_of_doubles; ++i) { text.insert(text.end(), text.begin(), text.end()); sizes_to_test.push_back(text.size()); } // allocate host arrays std::vector<unsigned char> text_gpu(text.size()); std::vector<unsigned char> text_host(text.size()); // Compute the size of the arrays in bytes for memory allocation. // We need enough padding so that the uint2 access won't be out of bounds. const int num_bytes_alloc = (text.size() + 7) * sizeof(unsigned char); // pointers to device arrays unsigned char* device_input_array = 0; unsigned char* device_output_array = 0; // cudaMalloc device arrays cudaMalloc((void**)&device_input_array, num_bytes_alloc); cudaMalloc((void**)&device_output_array, num_bytes_alloc); // set the padding to 0 to avoid overflow. cudaMemset(device_input_array + text.size(), 0, num_bytes_alloc - text.size()); // if either memory allocation failed, report an error message if(device_input_array == 0 || device_output_array == 0) { std::cerr << "Couldn't allocate memory!" << std::endl; return 1; } // generate random shift unsigned char shift_amount = (rand() % 25) + 1; //we don't want the shift to be 0! /* Size of text in bytes. This is the largest size that was allocated. */ const int num_bytes = text.size() * sizeof(unsigned char); // copy input to GPU { event_pair timer; start_timer(&timer); cudaMemcpy(device_input_array, &text[0], text.size(), cudaMemcpyHostToDevice); check_launch("copy to gpu"); double elapsed_time_h2d = stop_timer(&timer); std::cout << "Host -> Device transfer bandwidth " << num_bytes / (elapsed_time_h2d / 1000.) / 1E9 << std::endl << std::endl; } // generate reference output { event_pair timer; start_timer(&timer); host_shift(text, text_host, shift_amount); double elapsed_time_host = stop_timer(&timer); std::cout << "Host (reference) solution bandwidth GB/sec: " << 2 * num_bytes / (elapsed_time_host / 1000.) / 1E9 << std::endl << std::endl; } // CUDA block size const int block_size = 256; std::cout << std::setw(45) << "Device Bandwidth GB/sec" << std::endl; std::cout << std::setw(70) << std::setfill('-') << " " << std::endl << std::setfill(' '); std::cout << std::setw(15) << " " << std::setw(15) << "char" << std::setw( 15) << "uint" << std::setw(15) << "uint2" << std::endl; std::cout << std::setw(15) << "Problem Size MB" << std::endl; // Loop through all the problem sizes and generate timing / bandwidth information for each // and also check correctness for(uint i = 0; i < sizes_to_test.size(); ++i) { // generate GPU char output double elapsed_time_char = doGPUShiftChar(device_input_array, device_output_array, shift_amount, sizes_to_test[i], block_size); checkResults(text_host, device_output_array, sizes_to_test[i], "char"); // make sure we don't falsely say the next kernel is correct because we've left the correct answer sitting in memory cudaMemset(device_output_array, 0, sizes_to_test[i]); // generate GPU uint output double elapsed_time_uint = doGPUShiftUInt(device_input_array, device_output_array, shift_amount, sizes_to_test[i], block_size); checkResults(text_host, device_output_array, sizes_to_test[i], "uint"); // make sure we don't falsely say the next kernel is correct because we've left the correct answer sitting in memory cudaMemset(device_output_array, 0, sizes_to_test[i]); // generate GPU uint2 output double elapsed_time_uint2 = doGPUShiftUInt2(device_input_array, device_output_array, shift_amount, sizes_to_test[i], block_size); checkResults(text_host, device_output_array, sizes_to_test[i], "uint2"); // make sure we don't falsely say the next kernel is correct because we've left the correct answer sitting in memory cudaMemset(device_output_array, 0, sizes_to_test[i]); std::cout << std::setw(15) << sizes_to_test[i] / 1E6 << " " << std::setw(15) << 2 * sizes_to_test[i] / (elapsed_time_char / 1000.) / 1E9 << std::setw(15) << 2 * sizes_to_test[i] / (elapsed_time_uint / 1000.) / 1E9 << std::setw(15) << 2 * sizes_to_test[i] / (elapsed_time_uint2 / 1000.) / 1E9 << std::endl; } // deallocate memory cudaFree(device_input_array); cudaFree(device_output_array); }
28ee08e0550cbc0da6eb054fcf4ba180eab2dfb0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stencil.cuh" #include <gp/cuda-util-kernel.h> // Constants holding array sizes and pointers and coefficients. // // Values are set by cuda calls, they don't automatically take values // of variables in the C program with the same name. // __constant__ float v0, v1; __constant__ int array_size; __constant__ float* a; __constant__ float* b; extern __shared__ float s[]; // Shared memory for buffering a elements. __global__ void stencil(); __global__ void stencil_iter(); __global__ void stencil_shared(); __host__ hipError_t kernels_get_attr(struct hipFuncAttributes *attr, char **names) { // Return attributes of kernels, such as number of registers // used. hipError_t er; CU_SYM(a); CU_SYM(b); CU_SYM(array_size); CU_SYM(v0); CU_SYM(v1); #define GETATTR(func) \ er = hipFuncGetAttributes(attr,func); *names = #func; attr++; names++; \ if ( er ) return er; GETATTR(stencil); GETATTR(stencil_iter); GETATTR(stencil_shared); *names = NULL; return er; #undef GETATTR } __host__ void stencil_launch(dim3 dg, dim3 db, bool shared_kernel) { // Launch the kernel, using the provided configuration (block size, etc). // // stencil<<<dg,db>>>(); int shared_amt = (db.x + 2) * sizeof(float); if ( shared_kernel ) hipLaunchKernelGGL(( stencil_shared), dim3(dg),dim3(db),shared_amt, 0, ); else hipLaunchKernelGGL(( stencil_iter), dim3(dg),dim3(db), 0, 0, ); } __global__ void stencil() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= array_size ) return; b[idx] = v0 * a[idx] + v1 * ( a[idx-1] + a[idx+1] ); } __global__ void stencil_iter() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_count = blockDim.x * gridDim.x; for ( int idx = tid; idx < array_size; idx += thread_count ) { b[idx] = v0 * a[idx] + v1 * ( a[idx-1] + a[idx+1] ); } } __global__ void stencil_shared() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_count = blockDim.x * gridDim.x; int bl_idx = blockDim.x - 1; int sidx = threadIdx.x + 1; __shared__ float s[1024]; for ( int idx = tid; idx < array_size; idx += thread_count ) { // Load shared memory with a values. // float a_0 = a[idx]; s[sidx] = a_0; // Two lucky threads per block get to preload a second element. // if ( threadIdx.x == 0 ) s[0] = a[idx-1]; if ( threadIdx.x == bl_idx ) s[blockDim.x+1] = a[idx+1]; // Wait for everyone to finish. // // __syncthreads(); // Note that values read from a register or shared memory. // b[idx] = v0 * a_0 + v1 * ( s[sidx-1] + s[sidx+1] ); } }
28ee08e0550cbc0da6eb054fcf4ba180eab2dfb0.cu
#include "stencil.cuh" #include <gp/cuda-util-kernel.h> // Constants holding array sizes and pointers and coefficients. // // Values are set by cuda calls, they don't automatically take values // of variables in the C program with the same name. // __constant__ float v0, v1; __constant__ int array_size; __constant__ float* a; __constant__ float* b; extern __shared__ float s[]; // Shared memory for buffering a elements. __global__ void stencil(); __global__ void stencil_iter(); __global__ void stencil_shared(); __host__ cudaError_t kernels_get_attr(struct cudaFuncAttributes *attr, char **names) { // Return attributes of kernels, such as number of registers // used. cudaError_t er; CU_SYM(a); CU_SYM(b); CU_SYM(array_size); CU_SYM(v0); CU_SYM(v1); #define GETATTR(func) \ er = cudaFuncGetAttributes(attr,func); *names = #func; attr++; names++; \ if ( er ) return er; GETATTR(stencil); GETATTR(stencil_iter); GETATTR(stencil_shared); *names = NULL; return er; #undef GETATTR } __host__ void stencil_launch(dim3 dg, dim3 db, bool shared_kernel) { // Launch the kernel, using the provided configuration (block size, etc). // // stencil<<<dg,db>>>(); int shared_amt = (db.x + 2) * sizeof(float); if ( shared_kernel ) stencil_shared<<<dg,db,shared_amt>>>(); else stencil_iter<<<dg,db>>>(); } __global__ void stencil() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= array_size ) return; b[idx] = v0 * a[idx] + v1 * ( a[idx-1] + a[idx+1] ); } __global__ void stencil_iter() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_count = blockDim.x * gridDim.x; for ( int idx = tid; idx < array_size; idx += thread_count ) { b[idx] = v0 * a[idx] + v1 * ( a[idx-1] + a[idx+1] ); } } __global__ void stencil_shared() { // Compute a unique index (number) for this thread. // This will be used as an array index. // int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_count = blockDim.x * gridDim.x; int bl_idx = blockDim.x - 1; int sidx = threadIdx.x + 1; __shared__ float s[1024]; for ( int idx = tid; idx < array_size; idx += thread_count ) { // Load shared memory with a values. // float a_0 = a[idx]; s[sidx] = a_0; // Two lucky threads per block get to preload a second element. // if ( threadIdx.x == 0 ) s[0] = a[idx-1]; if ( threadIdx.x == bl_idx ) s[blockDim.x+1] = a[idx+1]; // Wait for everyone to finish. // // __syncthreads(); // Note that values read from a register or shared memory. // b[idx] = v0 * a_0 + v1 * ( s[sidx-1] + s[sidx+1] ); } }
35524ad48a19826b875137673fe46d77a29418bf.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/cudafeatures2d.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" // Questo algoritmo una modifica del Komura Equivalence (KE) che esegue le operazioni in due livelli (stage). // Inizialmente esegue le operazioni nel blocco usando la shared memory e poi merga le etichette sui bordi dei // blocchi. Varie prove hanno mostrato che sulla quadro va peggio della versione BUF. #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; // Algorithm itself has good performances, but memory allocation is a problem. // I will try to reduce it. namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } __device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) { bitmap |= (1 << pos); } // Risale alla radice dell'albero a partire da un suo nodo n __device__ unsigned Find(const int *s_buf, unsigned n) { // Attenzione: non invocare la find su un pixel di background while (s_buf[n] != n) { n = s_buf[n]; } return n; } // Unisce gli alberi contenenti i nodi a e b, collegandone le radici __device__ void Union(int *s_buf, unsigned a, unsigned b) { bool done; do { a = Find(s_buf, a); b = Find(s_buf, b); if (a < b) { int old = atomicMin(s_buf + b, a); done = (old == b); b = old; } else if (b < a) { int old = atomicMin(s_buf + a, b); done = (old == a); a = old; } else { done = true; } } while (!done); } __global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; __shared__ int buf[BLOCK_ROWS * BLOCK_COLS]; unsigned buf_index = threadIdx.y * BLOCK_COLS + threadIdx.x; if (row < labels.rows && col < labels.cols) { buf[buf_index] = buf_index; } __syncthreads(); if (row < labels.rows && col < labels.cols) { // 0|1 2|3 // --+---+-- // 4|A B| // 5|C D| // --+---+ unsigned char P = 0; if ((threadIdx.x > 0 || threadIdx.y > 0)) { if (img[img_index]) { P |= 0x37; // 00110111 } } if ((threadIdx.y > 0 || threadIdx.x < BLOCK_COLS - 1) && (col + 1 < img.cols)) { if (img[img_index + 1]) { P |= 0x0E; // 00001110 } } if ((threadIdx.x > 0) && (row + 1 < img.rows)) { if (img[img_index + img.step]) { P |= 0x30; // 00110000 } } if (threadIdx.x == 0) { P &= 0xCE; // 11001110 } if (col + 1 >= img.cols) { P &= 0xF3; // 11110011 } else if ((threadIdx.x + 1 == BLOCK_COLS) || (col + 2 >= img.cols)) { P &= 0xF7; // 11110111 } if (threadIdx.y == 0) { P &= 0xF0; // 11110000 } if (row + 1 >= img.rows) { P &= 0xDF; // 11011111 } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors if (P > 0) { if (HasBit(P, 0) && img[img_index - img.step - 1]) { Union(buf, buf_index, buf_index - BLOCK_COLS - 1); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { Union(buf, buf_index, buf_index - BLOCK_COLS); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { Union(buf, buf_index, buf_index + 1 - BLOCK_COLS); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 5) && img[img_index + img.step - 1])) { Union(buf, buf_index, buf_index - 1); } } } __syncthreads(); // Local compression if (row < labels.rows && col < labels.cols) { unsigned f = Find(buf, buf_index); unsigned f_row = f / BLOCK_COLS; unsigned f_col = f % BLOCK_COLS; unsigned global_f = 2 * (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + 2 * (blockIdx.x * BLOCK_COLS + f_col); labels.data[labels_index] = global_f; } } __global__ void GlobalMerge(cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned char P = 0; if (((threadIdx.x == 0 && col > 0) || (threadIdx.y == 0 && row > 0))) { if (img[img_index]) { P |= 0x37; // 00110111 } } if (((threadIdx.y == 0 && row > 0) || (threadIdx.x == BLOCK_COLS - 1 && col + 2 < img.cols)) && (col + 1 < img.cols)) { if (img[img_index + 1]) { P |= 0x0E; // 00001110 } } if ((threadIdx.x == 0 && col > 0) && (row + 1 < img.rows)) { if (img[img_index + img.step]) { P |= 0x30; // 00110000 } } if (col == 0) { P &= 0xCE; // 11001110 } if (col + 1 >= img.cols) { P &= 0xF3; // 11110011 } else if (col + 2 >= img.cols) { P &= 0xF7; // 11110111 } if (row == 0) { P &= 0xF0; // 11110000 } if (row + 1 >= img.rows) { P &= 0xDF; // 11011111 } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors if (P > 0) { if (HasBit(P, 0) && img[img_index - img.step - 1]) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 5) && img[img_index + img.step - 1])) { Union(labels.data, labels_index, labels_index - 2); } } } } __global__ void Compression(cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { labels[labels_index] = Find(labels.data, labels_index); } } __global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; unsigned img_index = row * (img.step / img.elem_size) + col; if (row < labels.rows && col < labels.cols) { int label = labels[labels_index] + 1; if (img[img_index]) labels[labels_index] = label; else { labels[labels_index] = 0; } if (col + 1 < labels.cols) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (row + 1 < labels.rows) { if (img[img_index + img.step + 1]) labels[labels_index + (labels.step / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.step / labels.elem_size) + 1] = 0; } } } if (row + 1 < labels.rows) { if (img[img_index + img.step]) labels[labels_index + (labels.step / labels.elem_size)] = label; else { labels[labels_index + (labels.step / labels.elem_size)] = 0; } } } } } class BKE_2S : public GpuLabeling2D<CONN_8> { private: dim3 grid_size_; dim3 block_size_; unsigned char *last_pixel_; bool last_pixel_allocated_; public: BKE_2S() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); last_pixel_allocated_ = false; if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) { hipMalloc(&last_pixel_, sizeof(unsigned char)); last_pixel_allocated_ = true; } else { // last_pixel_ = d_img_labels_.data + d_img_labels_.step + sizeof(unsigned int); last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize(); } grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i local_labels; //cuda::GpuMat d_local_merge; //d_img_labels_.copyTo(d_local_merge); //FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_local_merge); //d_local_merge.download(local_labels); GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); Compression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //d_img_labels_.download(img_labels_); if (last_pixel_allocated_) { hipFree(last_pixel_); } hipDeviceSynchronize(); } private: void Alloc() { d_img_labels_.create(d_img_.size(), CV_32SC1); } void Dealloc() { } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); Compression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); // d_img_labels_.download(img_labels_); hipDeviceSynchronize(); } public: void PerformLabelingWithSteps() { perf_.start(); Alloc(); perf_.stop(); double alloc_timing = perf_.last(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); perf_.start(); Dealloc(); perf_.stop(); double dealloc_timing = perf_.last(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BKE_2S);
35524ad48a19826b875137673fe46d77a29418bf.cu
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" // Questo algoritmo è una modifica del Komura Equivalence (KE) che esegue le operazioni in due livelli (stage). // Inizialmente esegue le operazioni nel blocco usando la shared memory e poi merga le etichette sui bordi dei // blocchi. Varie prove hanno mostrato che sulla quadro va peggio della versione BUF. #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; // Algorithm itself has good performances, but memory allocation is a problem. // I will try to reduce it. namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } __device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) { bitmap |= (1 << pos); } // Risale alla radice dell'albero a partire da un suo nodo n __device__ unsigned Find(const int *s_buf, unsigned n) { // Attenzione: non invocare la find su un pixel di background while (s_buf[n] != n) { n = s_buf[n]; } return n; } // Unisce gli alberi contenenti i nodi a e b, collegandone le radici __device__ void Union(int *s_buf, unsigned a, unsigned b) { bool done; do { a = Find(s_buf, a); b = Find(s_buf, b); if (a < b) { int old = atomicMin(s_buf + b, a); done = (old == b); b = old; } else if (b < a) { int old = atomicMin(s_buf + a, b); done = (old == a); a = old; } else { done = true; } } while (!done); } __global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; __shared__ int buf[BLOCK_ROWS * BLOCK_COLS]; unsigned buf_index = threadIdx.y * BLOCK_COLS + threadIdx.x; if (row < labels.rows && col < labels.cols) { buf[buf_index] = buf_index; } __syncthreads(); if (row < labels.rows && col < labels.cols) { // 0|1 2|3 // --+---+-- // 4|A B| // 5|C D| // --+---+ unsigned char P = 0; if ((threadIdx.x > 0 || threadIdx.y > 0)) { if (img[img_index]) { P |= 0x37; // 00110111 } } if ((threadIdx.y > 0 || threadIdx.x < BLOCK_COLS - 1) && (col + 1 < img.cols)) { if (img[img_index + 1]) { P |= 0x0E; // 00001110 } } if ((threadIdx.x > 0) && (row + 1 < img.rows)) { if (img[img_index + img.step]) { P |= 0x30; // 00110000 } } if (threadIdx.x == 0) { P &= 0xCE; // 11001110 } if (col + 1 >= img.cols) { P &= 0xF3; // 11110011 } else if ((threadIdx.x + 1 == BLOCK_COLS) || (col + 2 >= img.cols)) { P &= 0xF7; // 11110111 } if (threadIdx.y == 0) { P &= 0xF0; // 11110000 } if (row + 1 >= img.rows) { P &= 0xDF; // 11011111 } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors if (P > 0) { if (HasBit(P, 0) && img[img_index - img.step - 1]) { Union(buf, buf_index, buf_index - BLOCK_COLS - 1); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { Union(buf, buf_index, buf_index - BLOCK_COLS); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { Union(buf, buf_index, buf_index + 1 - BLOCK_COLS); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 5) && img[img_index + img.step - 1])) { Union(buf, buf_index, buf_index - 1); } } } __syncthreads(); // Local compression if (row < labels.rows && col < labels.cols) { unsigned f = Find(buf, buf_index); unsigned f_row = f / BLOCK_COLS; unsigned f_col = f % BLOCK_COLS; unsigned global_f = 2 * (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + 2 * (blockIdx.x * BLOCK_COLS + f_col); labels.data[labels_index] = global_f; } } __global__ void GlobalMerge(cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned char P = 0; if (((threadIdx.x == 0 && col > 0) || (threadIdx.y == 0 && row > 0))) { if (img[img_index]) { P |= 0x37; // 00110111 } } if (((threadIdx.y == 0 && row > 0) || (threadIdx.x == BLOCK_COLS - 1 && col + 2 < img.cols)) && (col + 1 < img.cols)) { if (img[img_index + 1]) { P |= 0x0E; // 00001110 } } if ((threadIdx.x == 0 && col > 0) && (row + 1 < img.rows)) { if (img[img_index + img.step]) { P |= 0x30; // 00110000 } } if (col == 0) { P &= 0xCE; // 11001110 } if (col + 1 >= img.cols) { P &= 0xF3; // 11110011 } else if (col + 2 >= img.cols) { P &= 0xF7; // 11110111 } if (row == 0) { P &= 0xF0; // 11110000 } if (row + 1 >= img.rows) { P &= 0xDF; // 11011111 } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors if (P > 0) { if (HasBit(P, 0) && img[img_index - img.step - 1]) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 5) && img[img_index + img.step - 1])) { Union(labels.data, labels_index, labels_index - 2); } } } } __global__ void Compression(cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { labels[labels_index] = Find(labels.data, labels_index); } } __global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; unsigned img_index = row * (img.step / img.elem_size) + col; if (row < labels.rows && col < labels.cols) { int label = labels[labels_index] + 1; if (img[img_index]) labels[labels_index] = label; else { labels[labels_index] = 0; } if (col + 1 < labels.cols) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (row + 1 < labels.rows) { if (img[img_index + img.step + 1]) labels[labels_index + (labels.step / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.step / labels.elem_size) + 1] = 0; } } } if (row + 1 < labels.rows) { if (img[img_index + img.step]) labels[labels_index + (labels.step / labels.elem_size)] = label; else { labels[labels_index + (labels.step / labels.elem_size)] = 0; } } } } } class BKE_2S : public GpuLabeling2D<CONN_8> { private: dim3 grid_size_; dim3 block_size_; unsigned char *last_pixel_; bool last_pixel_allocated_; public: BKE_2S() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); last_pixel_allocated_ = false; if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) { cudaMalloc(&last_pixel_, sizeof(unsigned char)); last_pixel_allocated_ = true; } else { // last_pixel_ = d_img_labels_.data + d_img_labels_.step + sizeof(unsigned int); last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize(); } grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i local_labels; //cuda::GpuMat d_local_merge; //d_img_labels_.copyTo(d_local_merge); //FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_local_merge); //d_local_merge.download(local_labels); GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); Compression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //d_img_labels_.download(img_labels_); if (last_pixel_allocated_) { cudaFree(last_pixel_); } cudaDeviceSynchronize(); } private: void Alloc() { d_img_labels_.create(d_img_.size(), CV_32SC1); } void Dealloc() { } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); Compression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); // d_img_labels_.download(img_labels_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { perf_.start(); Alloc(); perf_.stop(); double alloc_timing = perf_.last(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); perf_.start(); Dealloc(); perf_.stop(); double dealloc_timing = perf_.last(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BKE_2S);
8e4eddc04d5d782ecae241b50b2993c729e394f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdint.h> #include <math.h> #include <algorithm> #include "backend/kernel_compiler/gpu/cuda_impl/resize_nearest_neighbor_impl.cuh" template <typename T> __global__ void ResizeNearestNeighbor(const int size, const T *input, const int s1, const int s2, const int s3, const int s4, T *output, const int d1, const int d2, const int d3, const int d4, bool align_corners, float h_scale, float w_scale) { // initialization // HalfPixelCenters false int input_pos; int pos_array[RESIZENEARESTNEIGHBOR_DIMENSION]; int in_height = s3; int in_width = s4; // for example 4-D: pos = pos_array[0] * output_shape[1] * output_shape[2] * output_shape[3] + // pos_array[1] * output_shape[2] * output_shape[3] + // pos_array[2] * output_shape[3] + // pos_array[3] int out_h; int out_w; for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) { pos_array[0] = pos / (d2 * d3 * d4) % d1; pos_array[1] = pos / (d3 * d4) % d2; pos_array[2] = pos / (d4) % d3; pos_array[3] = pos % d4; out_h = pos_array[2]; out_w = pos_array[3]; const int in_y = min((align_corners) ? static_cast<int>(roundf(out_h * h_scale)) : static_cast<int>(floorf(out_h * h_scale)), in_height - 1); const int in_x = min((align_corners) ? static_cast<int>(roundf(out_w * w_scale)) : static_cast<int>(floorf(out_w * w_scale)), in_width - 1); // pos_array[0] N, pos_array[1] C, in_y H, in_x W input_pos = pos_array[0] * s2 * s3 * s4 + pos_array[1] * s3 * s4 + in_y * s4 + in_x; output[pos] = input[input_pos]; } return; } template <typename T> void CalResizeNearestNeighbor(const int size, const T *input, const int s1, const int s2, const int s3, const int s4, T *output, const int d1, const int d2, const int d3, const int d4, bool align_corners, float h_scale, float w_scale, hipStream_t cuda_stream) { hipLaunchKernelGGL(( ResizeNearestNeighbor), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input, s1, s2, s3, s4, output, d1, d2, d3, d4, align_corners, h_scale, w_scale); return; } template void CalResizeNearestNeighbor<float>(const int size, const float *input, const int s1, const int s2, const int s3, const int s4, float *output, const int d1, const int d2, const int d3, const int d4, bool align_corners, float h_scale, float w_scale, hipStream_t cuda_stream); template void CalResizeNearestNeighbor<half>(const int size, const half *input, const int s1, const int s2, const int s3, const int s4, half *output, const int d1, const int d2, const int d3, const int d4, bool align_corners, float h_scale, float w_scale, hipStream_t cuda_stream); template void CalResizeNearestNeighbor<int>(const int size, const int *input, const int s1, const int s2, const int s3, const int s4, int *output, const int d1, const int d2, const int d3, const int d4, bool align_corners, float h_scale, float w_scale, hipStream_t cuda_stream);
8e4eddc04d5d782ecae241b50b2993c729e394f6.cu
/** * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdint.h> #include <math.h> #include <algorithm> #include "backend/kernel_compiler/gpu/cuda_impl/resize_nearest_neighbor_impl.cuh" template <typename T> __global__ void ResizeNearestNeighbor(const int size, const T *input, const int s1, const int s2, const int s3, const int s4, T *output, const int d1, const int d2, const int d3, const int d4, bool align_corners, float h_scale, float w_scale) { // initialization // HalfPixelCenters false int input_pos; int pos_array[RESIZENEARESTNEIGHBOR_DIMENSION]; int in_height = s3; int in_width = s4; // for example 4-D: pos = pos_array[0] * output_shape[1] * output_shape[2] * output_shape[3] + // pos_array[1] * output_shape[2] * output_shape[3] + // pos_array[2] * output_shape[3] + // pos_array[3] int out_h; int out_w; for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) { pos_array[0] = pos / (d2 * d3 * d4) % d1; pos_array[1] = pos / (d3 * d4) % d2; pos_array[2] = pos / (d4) % d3; pos_array[3] = pos % d4; out_h = pos_array[2]; out_w = pos_array[3]; const int in_y = min((align_corners) ? static_cast<int>(roundf(out_h * h_scale)) : static_cast<int>(floorf(out_h * h_scale)), in_height - 1); const int in_x = min((align_corners) ? static_cast<int>(roundf(out_w * w_scale)) : static_cast<int>(floorf(out_w * w_scale)), in_width - 1); // pos_array[0] N, pos_array[1] C, in_y H, in_x W input_pos = pos_array[0] * s2 * s3 * s4 + pos_array[1] * s3 * s4 + in_y * s4 + in_x; output[pos] = input[input_pos]; } return; } template <typename T> void CalResizeNearestNeighbor(const int size, const T *input, const int s1, const int s2, const int s3, const int s4, T *output, const int d1, const int d2, const int d3, const int d4, bool align_corners, float h_scale, float w_scale, cudaStream_t cuda_stream) { ResizeNearestNeighbor<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, s1, s2, s3, s4, output, d1, d2, d3, d4, align_corners, h_scale, w_scale); return; } template void CalResizeNearestNeighbor<float>(const int size, const float *input, const int s1, const int s2, const int s3, const int s4, float *output, const int d1, const int d2, const int d3, const int d4, bool align_corners, float h_scale, float w_scale, cudaStream_t cuda_stream); template void CalResizeNearestNeighbor<half>(const int size, const half *input, const int s1, const int s2, const int s3, const int s4, half *output, const int d1, const int d2, const int d3, const int d4, bool align_corners, float h_scale, float w_scale, cudaStream_t cuda_stream); template void CalResizeNearestNeighbor<int>(const int size, const int *input, const int s1, const int s2, const int s3, const int s4, int *output, const int d1, const int d2, const int d3, const int d4, bool align_corners, float h_scale, float w_scale, cudaStream_t cuda_stream);
46344d34b2a71776b2314af43ea4738a29d32e5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_ROCM #include "dragon/core/context_cuda.h" #include "dragon/core/workspace.h" #include "dragon/utils/device/common_cub.h" #include "dragon/utils/device/common_thrust.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename T, class CompareFunctor> struct SortFunctor { SortFunctor(int64_t CxS, int64_t S) : CxS_(CxS), S_(S) {} inline __device__ bool operator()( const thrust::tuple<int64_t, T>& lhs, const thrust::tuple<int64_t, T>& rhs) const { int64_t i = thrust::get<0>(lhs), j = thrust::get<0>(rhs); i = (i / CxS_) * S_ + i % S_; j = (j / CxS_) * S_ + j % S_; if (i != j) { return i < j; } else { return compare_functor_(thrust::get<1>(lhs), thrust::get<1>(rhs)); } } int64_t CxS_, S_; CompareFunctor compare_functor_; }; template <typename T, int ItemsPerThread> __global__ void _BlockSort( const int NxS, const int S, const int C, const int K, const bool largest, const T init, const T* x, T* y, int64_t* index) { typedef cub::BlockRadixSort<T, CUDA_THREADS, ItemsPerThread, int64_t> BlockSort; __shared__ typename BlockSort::TempStorage storage; CUDA_2D_KERNEL_LOOP1(i, NxS) { T keys[ItemsPerThread]; int64_t values[ItemsPerThread]; const int thread_offset = threadIdx.x * ItemsPerThread; const int x_offset = i / S * C * S + i % S; const int y_offset = i / S * K * S + i % S; #pragma unroll for (int j = 0; j < ItemsPerThread; ++j) { const int item_idx = thread_offset + j; values[j] = item_idx < C ? item_idx : C - 1; keys[j] = item_idx < C ? x[x_offset + item_idx * S] : init; } __syncthreads(); if (largest) { BlockSort(storage).SortDescending(keys, values); } else { BlockSort(storage).Sort(keys, values); } #pragma unroll for (int j = 0; j < ItemsPerThread; ++j) { if (thread_offset + j < K) { y[y_offset + (thread_offset + j) * S] = keys[j]; index[y_offset + (thread_offset + j) * S] = values[j]; } } } } template <typename T> void _DeviceSort( const int N, const int S, const int C, const int largest, T* key, int64_t* value, CUDAContext* ctx) { const auto NxS = N * S; const auto NxSxC = NxS * C; auto policy = thrust::hip::par.on(ctx->cuda_stream()); thrust::sequence(policy, value, value + NxSxC); if (NxS == 1) { if (largest > 0) { thrust::sort_by_key( policy, key, key + NxSxC, value, math::GreaterFunctor<T>()); } else { thrust::sort_by_key( policy, key, key + NxSxC, value, math::LessFunctor<T>()); } } else { auto kv = thrust::make_zip_iterator(thrust::make_tuple(value, key)); if (largest > 0) { thrust::sort( policy, kv, kv + NxSxC, SortFunctor<T, math::GreaterFunctor<T>>(C * S, S)); } else { thrust::sort( policy, kv, kv + NxSxC, SortFunctor<T, math::LessFunctor<T>>(C * S, S)); } } } template <typename T> __global__ void _GetTopK( const int NxKxS, const int S, const int C, const int K, const T* key, const int64_t* value, T* y, int64_t* index) { CUDA_1D_KERNEL_LOOP(yi, NxKxS) { const int xi = ((yi / S / K) * S + yi % S) * C + (yi / S) % K; y[yi] = key[xi]; index[yi] = value[xi] / S % C; } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DISPATCH_BLOCKSORT_KERNEL(T, kItemsPerThread) \ hipLaunchKernelGGL(( _BlockSort<T, kItemsPerThread>) \ , dim3(NxS), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \ NxS, \ S, \ C, \ K, \ largest > 0, \ init, \ reinterpret_cast<const T*>(x), \ reinterpret_cast<T*>(value), \ index) #define DEFINE_KERNEL_LAUNCHER(T, kLowest, kMax) \ template <> \ void TopK<T, CUDAContext>( \ const int N, \ const int S, \ const int C, \ const int K, \ const int largest, \ const T* x, \ T* value, \ int64_t* index, \ CUDAContext* ctx) { \ using ScalarT = math::ScalarType<T>::type; \ const int NxS = N * S; \ if (NxS == 1 || C > CUDA_THREADS * 8) { \ const auto NxCxS = N * C * S; \ const auto NxKxS = N * K * S; \ auto data = ctx->workspace()->template data<CUDAContext>( \ {NxCxS * sizeof(T), NxCxS * sizeof(int64_t)}, "data:1"); \ math::Copy(NxCxS, x, (T*)data[0], ctx); \ _DeviceSort( \ N, \ S, \ C, \ largest, \ reinterpret_cast<ScalarT*>(data[0]), \ reinterpret_cast<int64_t*>(data[1]), \ ctx); \ if (NxS == 1) { \ math::Copy(NxKxS, (T*)data[0], value, ctx); \ math::Copy(NxKxS, (int64_t*)data[1], index, ctx); \ } else { \ hipLaunchKernelGGL(( _GetTopK), dim3(CUDA_BLOCKS(NxKxS)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \ NxKxS, S, C, K, (T*)data[0], (int64_t*)data[1], value, index); \ } \ return; \ } \ ScalarT init = largest > 0 ? kLowest : kMax; \ if (C <= CUDA_THREADS) { \ DISPATCH_BLOCKSORT_KERNEL(ScalarT, 1); \ } else if (C <= CUDA_THREADS * 2) { \ DISPATCH_BLOCKSORT_KERNEL(ScalarT, 2); \ } else if (C <= CUDA_THREADS * 4) { \ DISPATCH_BLOCKSORT_KERNEL(ScalarT, 4); \ } else if (C <= CUDA_THREADS * 8) { \ DISPATCH_BLOCKSORT_KERNEL(ScalarT, 8); \ } else { \ LOG(FATAL) << "Too larger dimension (> " << CUDA_THREADS * 8 \ << ") to launch the cuda kernel"; \ } \ } DEFINE_KERNEL_LAUNCHER( uint8_t, std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max()); DEFINE_KERNEL_LAUNCHER( int8_t, std::numeric_limits<int8_t>::lowest(), std::numeric_limits<int8_t>::max()); DEFINE_KERNEL_LAUNCHER( int, std::numeric_limits<int>::lowest(), std::numeric_limits<int>::max()); DEFINE_KERNEL_LAUNCHER( int64_t, std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max()); DEFINE_KERNEL_LAUNCHER( float16, cub::Traits<half>::Lowest(), cub::Traits<half>::Max()); DEFINE_KERNEL_LAUNCHER( float, std::numeric_limits<float>::lowest(), std::numeric_limits<float>::max()); DEFINE_KERNEL_LAUNCHER( double, std::numeric_limits<double>::lowest(), std::numeric_limits<double>::max()); #undef DISPATCH_BLOCKSORT_KERNEL #undef DEFINE_KERNEL_LAUNCHER } // namespace kernels } // namespace dragon #endif // USE_ROCM
46344d34b2a71776b2314af43ea4738a29d32e5f.cu
#ifdef USE_CUDA #include "dragon/core/context_cuda.h" #include "dragon/core/workspace.h" #include "dragon/utils/device/common_cub.h" #include "dragon/utils/device/common_thrust.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename T, class CompareFunctor> struct SortFunctor { SortFunctor(int64_t CxS, int64_t S) : CxS_(CxS), S_(S) {} inline __device__ bool operator()( const thrust::tuple<int64_t, T>& lhs, const thrust::tuple<int64_t, T>& rhs) const { int64_t i = thrust::get<0>(lhs), j = thrust::get<0>(rhs); i = (i / CxS_) * S_ + i % S_; j = (j / CxS_) * S_ + j % S_; if (i != j) { return i < j; } else { return compare_functor_(thrust::get<1>(lhs), thrust::get<1>(rhs)); } } int64_t CxS_, S_; CompareFunctor compare_functor_; }; template <typename T, int ItemsPerThread> __global__ void _BlockSort( const int NxS, const int S, const int C, const int K, const bool largest, const T init, const T* x, T* y, int64_t* index) { typedef cub::BlockRadixSort<T, CUDA_THREADS, ItemsPerThread, int64_t> BlockSort; __shared__ typename BlockSort::TempStorage storage; CUDA_2D_KERNEL_LOOP1(i, NxS) { T keys[ItemsPerThread]; int64_t values[ItemsPerThread]; const int thread_offset = threadIdx.x * ItemsPerThread; const int x_offset = i / S * C * S + i % S; const int y_offset = i / S * K * S + i % S; #pragma unroll for (int j = 0; j < ItemsPerThread; ++j) { const int item_idx = thread_offset + j; values[j] = item_idx < C ? item_idx : C - 1; keys[j] = item_idx < C ? x[x_offset + item_idx * S] : init; } __syncthreads(); if (largest) { BlockSort(storage).SortDescending(keys, values); } else { BlockSort(storage).Sort(keys, values); } #pragma unroll for (int j = 0; j < ItemsPerThread; ++j) { if (thread_offset + j < K) { y[y_offset + (thread_offset + j) * S] = keys[j]; index[y_offset + (thread_offset + j) * S] = values[j]; } } } } template <typename T> void _DeviceSort( const int N, const int S, const int C, const int largest, T* key, int64_t* value, CUDAContext* ctx) { const auto NxS = N * S; const auto NxSxC = NxS * C; auto policy = thrust::cuda::par.on(ctx->cuda_stream()); thrust::sequence(policy, value, value + NxSxC); if (NxS == 1) { if (largest > 0) { thrust::sort_by_key( policy, key, key + NxSxC, value, math::GreaterFunctor<T>()); } else { thrust::sort_by_key( policy, key, key + NxSxC, value, math::LessFunctor<T>()); } } else { auto kv = thrust::make_zip_iterator(thrust::make_tuple(value, key)); if (largest > 0) { thrust::sort( policy, kv, kv + NxSxC, SortFunctor<T, math::GreaterFunctor<T>>(C * S, S)); } else { thrust::sort( policy, kv, kv + NxSxC, SortFunctor<T, math::LessFunctor<T>>(C * S, S)); } } } template <typename T> __global__ void _GetTopK( const int NxKxS, const int S, const int C, const int K, const T* key, const int64_t* value, T* y, int64_t* index) { CUDA_1D_KERNEL_LOOP(yi, NxKxS) { const int xi = ((yi / S / K) * S + yi % S) * C + (yi / S) % K; y[yi] = key[xi]; index[yi] = value[xi] / S % C; } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DISPATCH_BLOCKSORT_KERNEL(T, kItemsPerThread) \ _BlockSort<T, kItemsPerThread> \ <<<NxS, CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ NxS, \ S, \ C, \ K, \ largest > 0, \ init, \ reinterpret_cast<const T*>(x), \ reinterpret_cast<T*>(value), \ index) #define DEFINE_KERNEL_LAUNCHER(T, kLowest, kMax) \ template <> \ void TopK<T, CUDAContext>( \ const int N, \ const int S, \ const int C, \ const int K, \ const int largest, \ const T* x, \ T* value, \ int64_t* index, \ CUDAContext* ctx) { \ using ScalarT = math::ScalarType<T>::type; \ const int NxS = N * S; \ if (NxS == 1 || C > CUDA_THREADS * 8) { \ const auto NxCxS = N * C * S; \ const auto NxKxS = N * K * S; \ auto data = ctx->workspace()->template data<CUDAContext>( \ {NxCxS * sizeof(T), NxCxS * sizeof(int64_t)}, "data:1"); \ math::Copy(NxCxS, x, (T*)data[0], ctx); \ _DeviceSort( \ N, \ S, \ C, \ largest, \ reinterpret_cast<ScalarT*>(data[0]), \ reinterpret_cast<int64_t*>(data[1]), \ ctx); \ if (NxS == 1) { \ math::Copy(NxKxS, (T*)data[0], value, ctx); \ math::Copy(NxKxS, (int64_t*)data[1], index, ctx); \ } else { \ _GetTopK<<<CUDA_BLOCKS(NxKxS), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ NxKxS, S, C, K, (T*)data[0], (int64_t*)data[1], value, index); \ } \ return; \ } \ ScalarT init = largest > 0 ? kLowest : kMax; \ if (C <= CUDA_THREADS) { \ DISPATCH_BLOCKSORT_KERNEL(ScalarT, 1); \ } else if (C <= CUDA_THREADS * 2) { \ DISPATCH_BLOCKSORT_KERNEL(ScalarT, 2); \ } else if (C <= CUDA_THREADS * 4) { \ DISPATCH_BLOCKSORT_KERNEL(ScalarT, 4); \ } else if (C <= CUDA_THREADS * 8) { \ DISPATCH_BLOCKSORT_KERNEL(ScalarT, 8); \ } else { \ LOG(FATAL) << "Too larger dimension (> " << CUDA_THREADS * 8 \ << ") to launch the cuda kernel"; \ } \ } DEFINE_KERNEL_LAUNCHER( uint8_t, std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max()); DEFINE_KERNEL_LAUNCHER( int8_t, std::numeric_limits<int8_t>::lowest(), std::numeric_limits<int8_t>::max()); DEFINE_KERNEL_LAUNCHER( int, std::numeric_limits<int>::lowest(), std::numeric_limits<int>::max()); DEFINE_KERNEL_LAUNCHER( int64_t, std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max()); DEFINE_KERNEL_LAUNCHER( float16, cub::Traits<half>::Lowest(), cub::Traits<half>::Max()); DEFINE_KERNEL_LAUNCHER( float, std::numeric_limits<float>::lowest(), std::numeric_limits<float>::max()); DEFINE_KERNEL_LAUNCHER( double, std::numeric_limits<double>::lowest(), std::numeric_limits<double>::max()); #undef DISPATCH_BLOCKSORT_KERNEL #undef DEFINE_KERNEL_LAUNCHER } // namespace kernels } // namespace dragon #endif // USE_CUDA
7abfada59877cdda97efc3d9e210974084f5b490.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include <cfloat> namespace fastgm { namespace likelihood_kernel { const int BLOCK_SIZE = 256; const int BLOCK_WIDTH = 64; const int BLOCK_HEIGHT = 4; __device__ float safe_log(float x) { return x < 1e-9 ? -FLT_MAX : log(x); } /** * Compute the local probabilities for each node, storing them in the pot matrix */ template <class Grid> __global__ void pseudo_prob(Grid g, device_matrix<float> uf, device_matrix<float> pf, device_array<float> w, device_array<int> labeling, device_matrix<float> pot) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= 0 && y >= 0 && x < g.width() && y < g.height()) { int i = y * g.width() + x; // Compute probability of each local configuration as given in this labeling float sum = 0; for (int j = 0; j < g.num_labels(); j++) { sum += pot(i, j) = expf(g(uf.row(i), w, i, j) + (i < g.width() ? 0 : g(pf.row(i), w, g.edge_between(i - g.width(), i), j, labeling[i - g.width()])) + (i % g.width() == 0 ? 0 : g(pf.row(i), w, g.edge_between(i - 1, i), j, labeling[i - 1])) + (x % g.width() == g.width() - 1 ? 0 : g(pf.row(i), w, g.edge_between(i, i + 1), j, labeling[i + 1])) + (i >= g.width() * (g.height() - 1) ? 0 : g(pf.row(i), w, g.edge_between(i, i + g.width()), j, labeling[i + g.width()]))); } // Normalize the potentials for (int j = 0; j < g.num_labels(); j++) { pot(i, j) /= sum; } } } /** * Compute the unary probabilities for each node, storing them in the pot matrix */ template <class Grid> __global__ void unary_prob(const Grid g, device_matrix<float> uf, device_array<float> w, device_matrix<float> pot) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < g.size()) { // Compute probability of each node at each label float sum = 0; for (int j = 0; j < g.num_labels(); j++) { sum += pot(i, j) = expf(g(uf.row(i), w, i, j)); } // Normalize the potentials for (int j = 0; j < g.num_labels(); j++) { pot(i, j) /= sum; } } } /** * Compute the pairwise probabilities for each edge, storing them in the pot matrix */ template <class Grid> __global__ void pairwise_prob(Grid g, device_matrix<float> pf, device_array<float> w, device_matrix<float> pot) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < g.num_edges()) { grid_edge edge = g.edge(i); // Compute probability of each edge at each label float sum = 0; for (int p = 0; p < g.num_labels(); p++) { for (int q = 0; q < g.num_labels(); q++) { sum += pot(i, p * g.num_labels() + q) /*= pot(i, q * g.num_labels() + p)*/ = expf(g(pf.row(i), w, edge, p, q)); } } // Normalize the potentials for (int p = 0; p < g.num_labels(); p++) { for (int q = 0; q < g.num_labels(); q++) { pot(i, p * g.num_labels() + q) /= sum; } } } } /** * Compute the gradients for unary parameters, separately for each node * to prevent data races. */ template <class Grid> __global__ void compute_node_gradient(Grid g, device_matrix<float> grad, device_matrix<float> pot, device_matrix<float> uf, device_array<int> labeling) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < g.size()) { for (int f = 0; f < uf.cols(); f++) grad(f, i) = 0; for (int x = 0; x < g.num_labels(); x++) { g.unary().compute_features(uf.row(i), i, x); float l = pot(i, x) - (x == labeling[i]); for (int f = 0; f < uf.cols(); f++) { grad(f, i) += uf(i, f) * l; } } } } /** * Compute the gradients for pairwise parameters for pseudolikelihood, * separately for each edge to prevent data races. */ template <class Grid> __global__ void compute_edge_gradient_pseudo(Grid g, device_matrix<float> grad, device_matrix<float> pot, device_matrix<float> pf, device_array<int> labeling) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < g.num_edges()) { grid_edge edge = g.edge(i); for (int f = 0; f < pf.cols(); f++) grad(f, i) = 0; for (int p = 0; p < g.num_labels(); p++) { g.pairwise().compute_features(pf.row(i), edge, p, labeling[edge.second()]); float l = pot(edge.first(), p) - (p == labeling[edge.first()]); for (int f = 0; f < pf.cols(); f++) { grad(f, i) += pf(i, f) * l; } g.pairwise().compute_features(pf.row(i), edge, p, labeling[edge.first()]); l = pot(edge.second(), p) - (p == labeling[edge.second()]); for (int f = 0; f < pf.cols(); f++) { grad(f, i) += pf(i, f) * l; } } } } template <class Grid> __global__ void compute_edge_gradient(Grid g, device_matrix<float> grad, device_matrix<float> pot, device_matrix<float> pf, device_array<int> labeling) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < g.num_edges()) { grid_edge edge = g.edge(i); for (int f = 0; f < pf.cols(); f++) grad(f, i) = 0; for (int p = 0; p < g.num_labels(); p++) { for (int q = 0; q <= p; q++) { g.pairwise().compute_features(pf.row(i), edge, p, q); float l = pot(i, p * g.num_labels() + q) - ((p == labeling[edge.first()] && q == labeling[edge.second()]) || (p == labeling[edge.second()] && q == labeling[edge.first()])); for (int f = 0; f < pf.cols(); f++) { grad(f, i) += pf(i, f) * l; } } } } } /** * Compute the sum of the columns in each row of a matrix, storing them in * a vector. */ __global__ void sum_cols(device_matrix<float> m, int offset, device_array<float> result) { // TODO: could be implemented as a reduce operation int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < m.rows()) { for (int c = 0; c < m.cols(); c++) { result[i + offset] += m(i, c); } } } } }
7abfada59877cdda97efc3d9e210974084f5b490.cu
#pragma once #include <cfloat> namespace fastgm { namespace likelihood_kernel { const int BLOCK_SIZE = 256; const int BLOCK_WIDTH = 64; const int BLOCK_HEIGHT = 4; __device__ float safe_log(float x) { return x < 1e-9 ? -FLT_MAX : log(x); } /** * Compute the local probabilities for each node, storing them in the pot matrix */ template <class Grid> __global__ void pseudo_prob(Grid g, device_matrix<float> uf, device_matrix<float> pf, device_array<float> w, device_array<int> labeling, device_matrix<float> pot) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= 0 && y >= 0 && x < g.width() && y < g.height()) { int i = y * g.width() + x; // Compute probability of each local configuration as given in this labeling float sum = 0; for (int j = 0; j < g.num_labels(); j++) { sum += pot(i, j) = expf(g(uf.row(i), w, i, j) + (i < g.width() ? 0 : g(pf.row(i), w, g.edge_between(i - g.width(), i), j, labeling[i - g.width()])) + (i % g.width() == 0 ? 0 : g(pf.row(i), w, g.edge_between(i - 1, i), j, labeling[i - 1])) + (x % g.width() == g.width() - 1 ? 0 : g(pf.row(i), w, g.edge_between(i, i + 1), j, labeling[i + 1])) + (i >= g.width() * (g.height() - 1) ? 0 : g(pf.row(i), w, g.edge_between(i, i + g.width()), j, labeling[i + g.width()]))); } // Normalize the potentials for (int j = 0; j < g.num_labels(); j++) { pot(i, j) /= sum; } } } /** * Compute the unary probabilities for each node, storing them in the pot matrix */ template <class Grid> __global__ void unary_prob(const Grid g, device_matrix<float> uf, device_array<float> w, device_matrix<float> pot) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < g.size()) { // Compute probability of each node at each label float sum = 0; for (int j = 0; j < g.num_labels(); j++) { sum += pot(i, j) = expf(g(uf.row(i), w, i, j)); } // Normalize the potentials for (int j = 0; j < g.num_labels(); j++) { pot(i, j) /= sum; } } } /** * Compute the pairwise probabilities for each edge, storing them in the pot matrix */ template <class Grid> __global__ void pairwise_prob(Grid g, device_matrix<float> pf, device_array<float> w, device_matrix<float> pot) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < g.num_edges()) { grid_edge edge = g.edge(i); // Compute probability of each edge at each label float sum = 0; for (int p = 0; p < g.num_labels(); p++) { for (int q = 0; q < g.num_labels(); q++) { sum += pot(i, p * g.num_labels() + q) /*= pot(i, q * g.num_labels() + p)*/ = expf(g(pf.row(i), w, edge, p, q)); } } // Normalize the potentials for (int p = 0; p < g.num_labels(); p++) { for (int q = 0; q < g.num_labels(); q++) { pot(i, p * g.num_labels() + q) /= sum; } } } } /** * Compute the gradients for unary parameters, separately for each node * to prevent data races. */ template <class Grid> __global__ void compute_node_gradient(Grid g, device_matrix<float> grad, device_matrix<float> pot, device_matrix<float> uf, device_array<int> labeling) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < g.size()) { for (int f = 0; f < uf.cols(); f++) grad(f, i) = 0; for (int x = 0; x < g.num_labels(); x++) { g.unary().compute_features(uf.row(i), i, x); float l = pot(i, x) - (x == labeling[i]); for (int f = 0; f < uf.cols(); f++) { grad(f, i) += uf(i, f) * l; } } } } /** * Compute the gradients for pairwise parameters for pseudolikelihood, * separately for each edge to prevent data races. */ template <class Grid> __global__ void compute_edge_gradient_pseudo(Grid g, device_matrix<float> grad, device_matrix<float> pot, device_matrix<float> pf, device_array<int> labeling) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < g.num_edges()) { grid_edge edge = g.edge(i); for (int f = 0; f < pf.cols(); f++) grad(f, i) = 0; for (int p = 0; p < g.num_labels(); p++) { g.pairwise().compute_features(pf.row(i), edge, p, labeling[edge.second()]); float l = pot(edge.first(), p) - (p == labeling[edge.first()]); for (int f = 0; f < pf.cols(); f++) { grad(f, i) += pf(i, f) * l; } g.pairwise().compute_features(pf.row(i), edge, p, labeling[edge.first()]); l = pot(edge.second(), p) - (p == labeling[edge.second()]); for (int f = 0; f < pf.cols(); f++) { grad(f, i) += pf(i, f) * l; } } } } template <class Grid> __global__ void compute_edge_gradient(Grid g, device_matrix<float> grad, device_matrix<float> pot, device_matrix<float> pf, device_array<int> labeling) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < g.num_edges()) { grid_edge edge = g.edge(i); for (int f = 0; f < pf.cols(); f++) grad(f, i) = 0; for (int p = 0; p < g.num_labels(); p++) { for (int q = 0; q <= p; q++) { g.pairwise().compute_features(pf.row(i), edge, p, q); float l = pot(i, p * g.num_labels() + q) - ((p == labeling[edge.first()] && q == labeling[edge.second()]) || (p == labeling[edge.second()] && q == labeling[edge.first()])); for (int f = 0; f < pf.cols(); f++) { grad(f, i) += pf(i, f) * l; } } } } } /** * Compute the sum of the columns in each row of a matrix, storing them in * a vector. */ __global__ void sum_cols(device_matrix<float> m, int offset, device_array<float> result) { // TODO: could be implemented as a reduce operation int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < m.rows()) { for (int c = 0; c < m.cols(); c++) { result[i + offset] += m(i, c); } } } } }
fcce4734a186c2db026007f02efee9c318f398f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Mandelbrot.h" #include <iostream> #include <assert.h> #include "Device.h" #include <assert.h> #include "DomaineMath_GPU.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void mandel(uchar4* ptrDevPixels,uint w, uint h,float t,DomaineMath domaineMath); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ Mandelbrot::Mandelbrot(const Grid& grid, uint w, uint h, float dt,const DomaineMath& domaineMath) : Animable_I<uchar4>(grid, w, h, "Mandelbrot_Cuda_RGBA_uchar4",domaineMath), variateurAnimation(Interval<float>(20, 120), dt) { assert(w == h); // specific rippling // Inputs this->dt = dt; // Tools this->t = 0; // protected dans Animable } Mandelbrot::~Mandelbrot() { // rien } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API * * Note : domaineMath pas use car pas zoomable */ void Mandelbrot::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath) { Device::lastCudaError("rippling rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release // TODO lancer le kernel avec <<<dg,db>>> // le kernel est importer ci-dessus (ligne 19) float t=variateurAnimation.get(); hipLaunchKernelGGL(( mandel), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,t,domaineMath); Device::lastCudaError("rippling rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release } /** * Override * Call periodicly by the API */ void Mandelbrot::animationStep() { t += variateurAnimation.varierAndGet(); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
fcce4734a186c2db026007f02efee9c318f398f0.cu
#include "Mandelbrot.h" #include <iostream> #include <assert.h> #include "Device.h" #include <assert.h> #include "DomaineMath_GPU.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void mandel(uchar4* ptrDevPixels,uint w, uint h,float t,DomaineMath domaineMath); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ Mandelbrot::Mandelbrot(const Grid& grid, uint w, uint h, float dt,const DomaineMath& domaineMath) : Animable_I<uchar4>(grid, w, h, "Mandelbrot_Cuda_RGBA_uchar4",domaineMath), variateurAnimation(Interval<float>(20, 120), dt) { assert(w == h); // specific rippling // Inputs this->dt = dt; // Tools this->t = 0; // protected dans Animable } Mandelbrot::~Mandelbrot() { // rien } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API * * Note : domaineMath pas use car pas zoomable */ void Mandelbrot::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath) { Device::lastCudaError("rippling rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release // TODO lancer le kernel avec <<<dg,db>>> // le kernel est importer ci-dessus (ligne 19) float t=variateurAnimation.get(); mandel<<<dg,db>>> (ptrDevPixels,w,h,t,domaineMath); Device::lastCudaError("rippling rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release } /** * Override * Call periodicly by the API */ void Mandelbrot::animationStep() { t += variateurAnimation.varierAndGet(); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
c565793113f5f85620d50d1648652fc7de5ab9b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <omp.h> #include <string> #include <math.h> #include "utils.h" #define BLOCK_DIM 32 __constant__ double h2_d; /* Initialize f. */ void init_f(double *f, long N) { #pragma omp parallel for schedule(static) for (long i = 0; i < N * N; i++) f[i] = 1.0; } /* Initialize u. */ void init_u(double *u, long N) { #pragma omp parallel for schedule(static) for (long i = 0; i < N * N; i++) u[i] = 0; } /* Copy array a to b. */ void cp_arr(double *a, double *b, long L) { for (long i = 0; i < L; i++) { b[i] = a[i]; } } /* CPU reference */ void jacobi2d(double *u, double *f, long N, long n_itr) { long M = N + 2; double h2 = 1.0 / ((N + 1) * (N + 1)); double *u_tmp = (double *)malloc(M * M * sizeof(double)); cp_arr(u, u_tmp, M * M); for (long k = 0; k < n_itr; k++) { // swap the pointers double *tmp = u; u = u_tmp; u_tmp = tmp; #pragma omp parallel for collapse(2) shared(u, u_tmp, N, M, h2, f) for (long i = 1; i <= N; i++) { for (long j = 1; j <= N; j++) { u[i * M + j] = 0.25 * (h2 * f[(i - 1) * N + (j - 1)] + u_tmp[(i - 1) * M + j] + u_tmp[i * M + j - 1] + u_tmp[(i + 1) * M + j] + u_tmp[i * M + j + 1]); } } } free(u_tmp); } void get_u_ref(double* u_ref, double* u_ref1, long N) { long M = N + 2; #pragma omp parallel for collapse(2) for (long i = 0; i < N; i++) { for (long j = 0; j < N; j++) { u_ref[i * N + j] = u_ref1[(i+1) * M + (j+1)]; } } } /* Jacobi GPU kernel */ __global__ void jacobi_kernel(double* u, double* u_tmp, const double* f_d, long N) { __shared__ double smem[BLOCK_DIM+2][BLOCK_DIM+2]; long offset_x = blockIdx.x * (BLOCK_DIM-2); long offset_y = blockIdx.y * (BLOCK_DIM-2); smem[threadIdx.x][threadIdx.y] = 0; if (offset_x + threadIdx.x < N && offset_y + threadIdx.y < N) smem[threadIdx.x][threadIdx.y] = u_tmp[(offset_x + threadIdx.x)*N + (offset_y + threadIdx.y)]; __syncthreads(); double sum; sum = 0.25 * (h2_d * f_d[(offset_x + threadIdx.x)*N + (offset_y + threadIdx.y)] + smem[threadIdx.x-1][threadIdx.y] + smem[threadIdx.x+1][threadIdx.y] + smem[threadIdx.x][threadIdx.y-1] + smem[threadIdx.x][threadIdx.y+1]); if (threadIdx.x+2 < BLOCK_DIM && threadIdx.y+2 < BLOCK_DIM) if (offset_x+threadIdx.x+2 <= N && offset_y+threadIdx.y+2 <= N) u[(offset_x+threadIdx.x)*N + (offset_y+threadIdx.y)] = sum; } int main(int argc, char const *argv[]) { Timer t; long n_itr = 10000; long N = 1000; double *u; hipHostMalloc((void**)&u, N * N * sizeof(double)); double *f; hipHostMalloc((void**)&f, N * N * sizeof(double)); init_u(u, N); init_f(f, N); /* CPU reference */ double *u_ref, *u_ref1; long M = N + 2; hipHostMalloc((void**)&u_ref, N * N * sizeof(double)); hipHostMalloc((void**)&u_ref1, M * M * sizeof(double)); init_u(u_ref, N); init_u(u_ref1, M); t.tic(); jacobi2d(u_ref1, f, N, n_itr); double tt = t.toc(); printf("CPU time = %f s\n", tt); printf("CPU flops = %f GFlop/s\n", n_itr* 2*(N-2)*(N-2)*4/tt*1e-9); get_u_ref(u_ref, u_ref1, N); hipHostFree(u_ref1); /* Allocate GPU memory */ double *u_d, *u_tmp, *f_d; hipMalloc(&u_d, N * N * sizeof(double)); hipMalloc(&u_tmp, N * N * sizeof(double)); hipMalloc(&f_d, N * N * sizeof(double)); /* Copy data to GPU */ hipMemcpy(u_d, u, N * N * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(u_tmp, u, N * N * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(f_d, f, N * N * sizeof(double), hipMemcpyHostToDevice); double h2 = 1.0 / ((N + 1) * (N + 1)); hipMemcpyToSymbol(&h2_d, &h2, sizeof(double)); dim3 blockDim(BLOCK_DIM, BLOCK_DIM); dim3 gridDim(N/(BLOCK_DIM-2)+1, N/(BLOCK_DIM-2)+1); hipLaunchKernelGGL(( jacobi_kernel), dim3(gridDim),dim3(blockDim), 0, 0, u_d, u_tmp, f_d, N); /* Iteration on GPU */ hipDeviceSynchronize(); t.tic(); for (long i = 0; i < n_itr; i++) { hipLaunchKernelGGL(( jacobi_kernel), dim3(gridDim),dim3(blockDim), 0, 0, u_d, u_tmp, f_d, N); /* swap pointers */ double* p_tmp = u_d; u_d = u_tmp; u_tmp = p_tmp; } hipDeviceSynchronize(); tt = t.toc(); printf("GPU time = %f s\n", tt); printf("GPU flops = %f GFlop/s\n", n_itr* 2*(N-2)*(N-2)*4/tt*1e-9); /* Copy data back to host */ hipMemcpy(u, u_d, N * N * sizeof(double), hipMemcpyDeviceToHost); /* Print error */ // double err = 0; // for (long i = 0; i < N*N; i++) err = ::max(err, fabs(u[i]-u_ref[i])); // printf("Error = %e\n", err); /* Free memory */ hipHostFree(u); hipHostFree(f); hipHostFree(u_ref); hipFree(u_d); hipFree(u_tmp); hipFree(f_d); return 0; }
c565793113f5f85620d50d1648652fc7de5ab9b9.cu
#include <stdio.h> #include <stdlib.h> #include <algorithm> #include <omp.h> #include <string> #include <math.h> #include "utils.h" #define BLOCK_DIM 32 __constant__ double h2_d; /* Initialize f. */ void init_f(double *f, long N) { #pragma omp parallel for schedule(static) for (long i = 0; i < N * N; i++) f[i] = 1.0; } /* Initialize u. */ void init_u(double *u, long N) { #pragma omp parallel for schedule(static) for (long i = 0; i < N * N; i++) u[i] = 0; } /* Copy array a to b. */ void cp_arr(double *a, double *b, long L) { for (long i = 0; i < L; i++) { b[i] = a[i]; } } /* CPU reference */ void jacobi2d(double *u, double *f, long N, long n_itr) { long M = N + 2; double h2 = 1.0 / ((N + 1) * (N + 1)); double *u_tmp = (double *)malloc(M * M * sizeof(double)); cp_arr(u, u_tmp, M * M); for (long k = 0; k < n_itr; k++) { // swap the pointers double *tmp = u; u = u_tmp; u_tmp = tmp; #pragma omp parallel for collapse(2) shared(u, u_tmp, N, M, h2, f) for (long i = 1; i <= N; i++) { for (long j = 1; j <= N; j++) { u[i * M + j] = 0.25 * (h2 * f[(i - 1) * N + (j - 1)] + u_tmp[(i - 1) * M + j] + u_tmp[i * M + j - 1] + u_tmp[(i + 1) * M + j] + u_tmp[i * M + j + 1]); } } } free(u_tmp); } void get_u_ref(double* u_ref, double* u_ref1, long N) { long M = N + 2; #pragma omp parallel for collapse(2) for (long i = 0; i < N; i++) { for (long j = 0; j < N; j++) { u_ref[i * N + j] = u_ref1[(i+1) * M + (j+1)]; } } } /* Jacobi GPU kernel */ __global__ void jacobi_kernel(double* u, double* u_tmp, const double* f_d, long N) { __shared__ double smem[BLOCK_DIM+2][BLOCK_DIM+2]; long offset_x = blockIdx.x * (BLOCK_DIM-2); long offset_y = blockIdx.y * (BLOCK_DIM-2); smem[threadIdx.x][threadIdx.y] = 0; if (offset_x + threadIdx.x < N && offset_y + threadIdx.y < N) smem[threadIdx.x][threadIdx.y] = u_tmp[(offset_x + threadIdx.x)*N + (offset_y + threadIdx.y)]; __syncthreads(); double sum; sum = 0.25 * (h2_d * f_d[(offset_x + threadIdx.x)*N + (offset_y + threadIdx.y)] + smem[threadIdx.x-1][threadIdx.y] + smem[threadIdx.x+1][threadIdx.y] + smem[threadIdx.x][threadIdx.y-1] + smem[threadIdx.x][threadIdx.y+1]); if (threadIdx.x+2 < BLOCK_DIM && threadIdx.y+2 < BLOCK_DIM) if (offset_x+threadIdx.x+2 <= N && offset_y+threadIdx.y+2 <= N) u[(offset_x+threadIdx.x)*N + (offset_y+threadIdx.y)] = sum; } int main(int argc, char const *argv[]) { Timer t; long n_itr = 10000; long N = 1000; double *u; cudaMallocHost((void**)&u, N * N * sizeof(double)); double *f; cudaMallocHost((void**)&f, N * N * sizeof(double)); init_u(u, N); init_f(f, N); /* CPU reference */ double *u_ref, *u_ref1; long M = N + 2; cudaMallocHost((void**)&u_ref, N * N * sizeof(double)); cudaMallocHost((void**)&u_ref1, M * M * sizeof(double)); init_u(u_ref, N); init_u(u_ref1, M); t.tic(); jacobi2d(u_ref1, f, N, n_itr); double tt = t.toc(); printf("CPU time = %f s\n", tt); printf("CPU flops = %f GFlop/s\n", n_itr* 2*(N-2)*(N-2)*4/tt*1e-9); get_u_ref(u_ref, u_ref1, N); cudaFreeHost(u_ref1); /* Allocate GPU memory */ double *u_d, *u_tmp, *f_d; cudaMalloc(&u_d, N * N * sizeof(double)); cudaMalloc(&u_tmp, N * N * sizeof(double)); cudaMalloc(&f_d, N * N * sizeof(double)); /* Copy data to GPU */ cudaMemcpy(u_d, u, N * N * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(u_tmp, u, N * N * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(f_d, f, N * N * sizeof(double), cudaMemcpyHostToDevice); double h2 = 1.0 / ((N + 1) * (N + 1)); cudaMemcpyToSymbol(&h2_d, &h2, sizeof(double)); dim3 blockDim(BLOCK_DIM, BLOCK_DIM); dim3 gridDim(N/(BLOCK_DIM-2)+1, N/(BLOCK_DIM-2)+1); jacobi_kernel<<<gridDim,blockDim>>>(u_d, u_tmp, f_d, N); /* Iteration on GPU */ cudaDeviceSynchronize(); t.tic(); for (long i = 0; i < n_itr; i++) { jacobi_kernel<<<gridDim,blockDim>>>(u_d, u_tmp, f_d, N); /* swap pointers */ double* p_tmp = u_d; u_d = u_tmp; u_tmp = p_tmp; } cudaDeviceSynchronize(); tt = t.toc(); printf("GPU time = %f s\n", tt); printf("GPU flops = %f GFlop/s\n", n_itr* 2*(N-2)*(N-2)*4/tt*1e-9); /* Copy data back to host */ cudaMemcpy(u, u_d, N * N * sizeof(double), cudaMemcpyDeviceToHost); /* Print error */ // double err = 0; // for (long i = 0; i < N*N; i++) err = std::max(err, fabs(u[i]-u_ref[i])); // printf("Error = %e\n", err); /* Free memory */ cudaFreeHost(u); cudaFreeHost(f); cudaFreeHost(u_ref); cudaFree(u_d); cudaFree(u_tmp); cudaFree(f_d); return 0; }
7ea67d253b57b139f7e01c54474793fc97b36e0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void calcDetectObjectsForwardGPU(float *in, float *out, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes ) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; for( int i = 0; i < max_bounding_boxes; i=i+(4+max_classes)){ int index = id * (in_size_x * in_size_y * in_size_z) + i; out[index ] = 1.0f / (1.0f + exp( -in[index ] )); // x: sigmoid out[index+1] = 1.0f / (1.0f + exp( -in[index+1] )); // y: sigmoid out[index+2] = exp( in[index+2] ); // w: exp out[index+3] = exp( in[index+3] ); // h: exp for( int c = 0; c < max_classes; ++c){ int index2 = id * (in_size_x * in_size_y * in_size_z) + i+4+c; out[index2] = 1.0f / (1.0f + exp( -in[index2] )); // id: sigmoid } } /* original for(int b = 0; b < in.size.b; ++b ){ for( int i = 0; i < _max_bounding_boxes; i=i+(4+_max_classes)){ out( b, i , 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i , 0, 0 ) )); // x: sigmoid out( b, i+1, 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i+1, 0, 0 ) )); // y: sigmoid out( b, i+2, 0, 0 ) = exp( in( b, i+2, 0, 0 ) ); // w: exp out( b, i+3, 0, 0 ) = exp( in( b, i+3, 0, 0 ) ); // h: exp for( int c = 0; c < _max_classes; ++c){ out( b, i+4+c, 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i+4+c , 0, 0 ) )); // id: sigmoid } } } */ }
7ea67d253b57b139f7e01c54474793fc97b36e0e.cu
#include "includes.h" __global__ void calcDetectObjectsForwardGPU(float *in, float *out, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes ) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; for( int i = 0; i < max_bounding_boxes; i=i+(4+max_classes)){ int index = id * (in_size_x * in_size_y * in_size_z) + i; out[index ] = 1.0f / (1.0f + exp( -in[index ] )); // x: sigmoid out[index+1] = 1.0f / (1.0f + exp( -in[index+1] )); // y: sigmoid out[index+2] = exp( in[index+2] ); // w: exp out[index+3] = exp( in[index+3] ); // h: exp for( int c = 0; c < max_classes; ++c){ int index2 = id * (in_size_x * in_size_y * in_size_z) + i+4+c; out[index2] = 1.0f / (1.0f + exp( -in[index2] )); // id: sigmoid } } /* original for(int b = 0; b < in.size.b; ++b ){ for( int i = 0; i < _max_bounding_boxes; i=i+(4+_max_classes)){ out( b, i , 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i , 0, 0 ) )); // x: sigmoid out( b, i+1, 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i+1, 0, 0 ) )); // y: sigmoid out( b, i+2, 0, 0 ) = exp( in( b, i+2, 0, 0 ) ); // w: exp out( b, i+3, 0, 0 ) = exp( in( b, i+3, 0, 0 ) ); // h: exp for( int c = 0; c < _max_classes; ++c){ out( b, i+4+c, 0, 0 ) = 1.0f / (1.0f + exp( -in( b, i+4+c , 0, 0 ) )); // id: sigmoid } } } */ }
894f8ce263e0edc92ad62d77c548cc5321e9573f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP __shared__ float s_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_B[TILE_WIDTH][TILE_WIDTH]; int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; int n = numAColumns; // or numBRows float sum = 0.0; for(int i=0;i<(n-1)/TILE_WIDTH+1;i++){ // the row is within the boundaries and the thread within the tile is loading a value within the matrix if( row < numARows && (i*TILE_WIDTH+threadIdx.x) < numAColumns ) s_A[threadIdx.y][threadIdx.x] = A[row*n + i*TILE_WIDTH + threadIdx.x]; else s_A[threadIdx.y][threadIdx.x] = 0.0; //the same mental exercise has to be done, should this line be written? is this tile value ok? if( (i*TILE_WIDTH+threadIdx.y) < numBRows && col < numBColumns ) s_B[threadIdx.y][threadIdx.x] = B[col + (i*TILE_WIDTH+threadIdx.y)*numCColumns]; else s_B[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); // Now, the product is partial, only calculated to what has been loaded from shared memory for(int pos = 0; pos < TILE_WIDTH; ++pos){ sum += s_A[threadIdx.y][pos] * s_B[pos][threadIdx.x]; } __syncthreads(); } if( (row < numCRows) && (col < numCColumns) ){ C[row*numCColumns + col] = sum; } } void loadBestLaunchKernelConfig(dim3* dimGrid, dim3* DimBlock){ hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); deviceProp.maxThreadsPerBlock } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set // this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc(numCRows * numCColumns * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(hipMalloc((void**) &deviceA, numARows * numAColumns * sizeof(float))); wbCheck(hipMalloc((void**) &deviceB, numBRows * numBColumns * sizeof(float))); wbCheck(hipMalloc((void**) &deviceC, numCRows * numCColumns * sizeof(float))); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(deviceA, hostA , numARows * numAColumns * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB , numBRows * numBColumns * sizeof(float), hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 DimGrid((numCColumns-1)/TILE_WIDTH + 1, (numCRows-1)/TILE_WIDTH + 1, 1); //nro of blocks in grid (at least 1 blocks) dim3 DimBlock(TILE_WIDTH, TILE_WIDTH, 1); //nro of threads in blocks wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiplyShared), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns ); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float) , hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
894f8ce263e0edc92ad62d77c548cc5321e9573f.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP __shared__ float s_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_B[TILE_WIDTH][TILE_WIDTH]; int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; int n = numAColumns; // or numBRows float sum = 0.0; for(int i=0;i<(n-1)/TILE_WIDTH+1;i++){ // the row is within the boundaries and the thread within the tile is loading a value within the matrix if( row < numARows && (i*TILE_WIDTH+threadIdx.x) < numAColumns ) s_A[threadIdx.y][threadIdx.x] = A[row*n + i*TILE_WIDTH + threadIdx.x]; else s_A[threadIdx.y][threadIdx.x] = 0.0; //the same mental exercise has to be done, should this line be written? is this tile value ok? if( (i*TILE_WIDTH+threadIdx.y) < numBRows && col < numBColumns ) s_B[threadIdx.y][threadIdx.x] = B[col + (i*TILE_WIDTH+threadIdx.y)*numCColumns]; else s_B[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); // Now, the product is partial, only calculated to what has been loaded from shared memory for(int pos = 0; pos < TILE_WIDTH; ++pos){ sum += s_A[threadIdx.y][pos] * s_B[pos][threadIdx.x]; } __syncthreads(); } if( (row < numCRows) && (col < numCColumns) ){ C[row*numCColumns + col] = sum; } } void loadBestLaunchKernelConfig(dim3* dimGrid, dim3* DimBlock){ cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); deviceProp.maxThreadsPerBlock } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set // this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc(numCRows * numCColumns * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(cudaMalloc((void**) &deviceA, numARows * numAColumns * sizeof(float))); wbCheck(cudaMalloc((void**) &deviceB, numBRows * numBColumns * sizeof(float))); wbCheck(cudaMalloc((void**) &deviceC, numCRows * numCColumns * sizeof(float))); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(deviceA, hostA , numARows * numAColumns * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB , numBRows * numBColumns * sizeof(float), cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 DimGrid((numCColumns-1)/TILE_WIDTH + 1, (numCRows-1)/TILE_WIDTH + 1, 1); //nro of blocks in grid (at least 1 blocks) dim3 DimBlock(TILE_WIDTH, TILE_WIDTH, 1); //nro of threads in blocks wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiplyShared<<<DimGrid,DimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns ); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float) , cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
2cef17486d3e816ae288ab8766a3a96bbef9f336.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> typedef struct { double x; double y; double z; double w; double xx; double yy; double zz; double ww; } double8; // Kernel function to add the elements of two arrays __global__ void cpy_double8(int n, double8 *src, double8 *dst) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride){ dst[i].x = src[i].x + 1; dst[i].y = src[i].y + 1; dst[i].z = src[i].z + 1; dst[i].w = src[i].w + 1; dst[i].xx = src[i].xx + 1; dst[i].yy = src[i].yy + 1; dst[i].zz = src[i].zz + 1; dst[i].ww = src[i].ww + 1; } } __global__ void fill_double8(int n, double8 *dst) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride){ dst[i].x = 0; dst[i].y = 0; dst[i].z = 0; dst[i].w = 0; dst[i].xx = 0; dst[i].yy = 0; dst[i].zz = 0; dst[i].ww = 0; } } int main(void) { int N = 1 << 20; double8 *x, *y, *z; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N * sizeof(double8)); hipMallocManaged(&y, N * sizeof(double8)); hipMallocManaged(&z, N * sizeof(double8)); int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; /* // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } */ //fill_double8<<<numBlocks, blockSize>>>(N, x); /* // Run kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(N, x, y); */ hipLaunchKernelGGL(( cpy_double8), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Copying to host memory hipMemcpy(z, y, N * sizeof(double8), hipMemcpyDeviceToHost); double maxError = 0.0; for (int i = 0; i < N; i++){ maxError = fmax(maxError, fabs(z[i].x - 1.0)); maxError = fmax(maxError, fabs(z[i].y - 1.0)); maxError = fmax(maxError, fabs(z[i].z - 1.0)); maxError = fmax(maxError, fabs(z[i].w - 1.0)); maxError = fmax(maxError, fabs(z[i].xx - 1.0)); maxError = fmax(maxError, fabs(z[i].yy - 1.0)); maxError = fmax(maxError, fabs(z[i].zz - 1.0)); maxError = fmax(maxError, fabs(z[i].ww - 1.0)); } std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); free(z); return 0; }
2cef17486d3e816ae288ab8766a3a96bbef9f336.cu
#include <iostream> #include <math.h> typedef struct { double x; double y; double z; double w; double xx; double yy; double zz; double ww; } double8; // Kernel function to add the elements of two arrays __global__ void cpy_double8(int n, double8 *src, double8 *dst) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride){ dst[i].x = src[i].x + 1; dst[i].y = src[i].y + 1; dst[i].z = src[i].z + 1; dst[i].w = src[i].w + 1; dst[i].xx = src[i].xx + 1; dst[i].yy = src[i].yy + 1; dst[i].zz = src[i].zz + 1; dst[i].ww = src[i].ww + 1; } } __global__ void fill_double8(int n, double8 *dst) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride){ dst[i].x = 0; dst[i].y = 0; dst[i].z = 0; dst[i].w = 0; dst[i].xx = 0; dst[i].yy = 0; dst[i].zz = 0; dst[i].ww = 0; } } int main(void) { int N = 1 << 20; double8 *x, *y, *z; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N * sizeof(double8)); cudaMallocManaged(&y, N * sizeof(double8)); cudaMallocManaged(&z, N * sizeof(double8)); int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; /* // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } */ //fill_double8<<<numBlocks, blockSize>>>(N, x); /* // Run kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(N, x, y); */ cpy_double8<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Copying to host memory cudaMemcpy(z, y, N * sizeof(double8), cudaMemcpyDeviceToHost); double maxError = 0.0; for (int i = 0; i < N; i++){ maxError = fmax(maxError, fabs(z[i].x - 1.0)); maxError = fmax(maxError, fabs(z[i].y - 1.0)); maxError = fmax(maxError, fabs(z[i].z - 1.0)); maxError = fmax(maxError, fabs(z[i].w - 1.0)); maxError = fmax(maxError, fabs(z[i].xx - 1.0)); maxError = fmax(maxError, fabs(z[i].yy - 1.0)); maxError = fmax(maxError, fabs(z[i].zz - 1.0)); maxError = fmax(maxError, fabs(z[i].ww - 1.0)); } std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); free(z); return 0; }
535f6344221495a75c6369199b50f14fc0411362.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void Add(int *a, int *b, int *c) { int tid = threadIdx.x; if (tid < N) c[tid] = a[tid] + b[tid]; }
535f6344221495a75c6369199b50f14fc0411362.cu
#include "includes.h" __global__ void Add(int *a, int *b, int *c) { int tid = threadIdx.x; if (tid < N) c[tid] = a[tid] + b[tid]; }
541ff9084bc7b88e1e11acdc4f3331bbaee263e6.hip
// !!! This is a file automatically generated by hipify!!! #include "device_global_memory.h" #include <math.h> #include <hip/hip_runtime.h> //vptree.cu global variables double* d_points; double* d_pointsAux; unsigned int* d_indexes; unsigned int* d_indexesAux; unsigned int* d_vpSwaps; double* d_treeMDs; unsigned int* d_treeIDXs; unsigned int* d_nodesOffset; unsigned int* d_nodesLength; //distances.cu global variables double* d_distances; //quick_select.cu global variables double* d_qsAux; unsigned int* d_f; unsigned int* d_t; unsigned int* d_addr; unsigned int* d_NFs; char* d_e; //knn_search.cu global variables double* d_qpoints; double* d_ndist; unsigned int* d_nidx; unsigned int* d_offsetsStack; unsigned int* d_lengthsStack; double* d_parentNDistStack; double* d_parentMdStack; char* d_isInnerStack; /*Returns the smallest power of two*/ static unsigned int smallest_power_two(unsigned int n) { unsigned int N = n; if ((N & (N - 1)) != 0) { // fix if n is not power of 2 N = 1; while (N < n) N <<= 1; } return N; } /*Functions to initialize memory*/ int qs_memory_allocate(unsigned int numberOfPoints, unsigned int maxParallelNodes) { hipError_t err; unsigned int fixedNoP = smallest_power_two(numberOfPoints + 1); //quick select needs length in powers of two //quick_select.cu global variables err = hipMalloc(&d_qsAux, fixedNoP * sizeof(double)); if (err != hipSuccess) return err; err = hipMalloc(&d_f, fixedNoP * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_t, fixedNoP * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_addr, fixedNoP * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_NFs, maxParallelNodes * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_e, fixedNoP * sizeof(char)); if (err != hipSuccess) return err; return hipSuccess; } int di_memory_allocate(unsigned int numberOfPoints) { hipError_t err; unsigned int fixedNoP = smallest_power_two(numberOfPoints + 1); //quick select needs length in powers of two //distances.cu global variables err = hipMalloc(&d_distances, fixedNoP * sizeof(double)); if (err != hipSuccess) return err; return hipSuccess; } int vp_memory_allocate(unsigned int numberOfPoints, unsigned int dimensionOfPoints) { hipError_t err; unsigned int fixedNoP = smallest_power_two(numberOfPoints + 1); //quick select needs length in powers of two unsigned int maxNodes = smallest_power_two(numberOfPoints + 1) / 2; //max nodes on the last level of the tree //vptree.cu global variables err = hipMalloc(&d_points, (numberOfPoints * dimensionOfPoints) * sizeof(double)); if (err != hipSuccess) return err; err = hipMalloc(&d_pointsAux, (numberOfPoints * dimensionOfPoints) * sizeof(double)); if (err != hipSuccess) return err; err = hipMalloc(&d_indexes, numberOfPoints * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_indexesAux, numberOfPoints * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_vpSwaps, fixedNoP * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_treeMDs, numberOfPoints * sizeof(double)); if (err != hipSuccess) return err; err = hipMalloc(&d_treeIDXs, numberOfPoints * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_nodesOffset, maxNodes * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_nodesLength, maxNodes * sizeof(unsigned int)); if (err != hipSuccess) return err; return hipSuccess; } int knn_memory_allocate(unsigned int n, unsigned int m, unsigned int d, unsigned int k) { hipError_t err; unsigned int maxDepth = (unsigned int)log2f(n) + 1; //knn_search.cu global variables err = hipMalloc(&d_points, (n * d) * sizeof(double)); if (err != hipSuccess) return err; err = hipMalloc(&d_qpoints, (m * d) * sizeof(double)); if (err != hipSuccess) return err; err = hipMalloc(&d_ndist, (m * k) * sizeof(double)); if (err != hipSuccess) return err; err = hipMalloc(&d_nidx, (m * k) * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_treeMDs, n * sizeof(double)); if (err != hipSuccess) return err; err = hipMalloc(&d_treeIDXs, n * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_offsetsStack, (m * maxDepth) * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_lengthsStack, (m * maxDepth) * sizeof(unsigned int)); if (err != hipSuccess) return err; err = hipMalloc(&d_parentNDistStack, (m * maxDepth) * sizeof(double)); if (err != hipSuccess) return err; err = hipMalloc(&d_parentMdStack, (m * maxDepth) * sizeof(double)); if (err != hipSuccess) return err; err = hipMalloc(&d_isInnerStack, (m * maxDepth) * sizeof(char)); if (err != hipSuccess) return err; return hipSuccess; } /*Functions to free memory*/ void qs_memory_deallocate() { hipFree(d_qsAux); hipFree(d_f); hipFree(d_t); hipFree(d_addr); hipFree(d_e); } void di_memory_deallocate() { hipFree(d_distances); } void vp_memory_deallocate() { hipFree(d_points); hipFree(d_pointsAux); hipFree(d_indexes); hipFree(d_indexesAux); hipFree(d_vpSwaps); hipFree(d_treeMDs); hipFree(d_treeIDXs); hipFree(d_nodesOffset); hipFree(d_nodesLength); } void knn_deallocate() { hipFree(d_points); hipFree(d_qpoints); hipFree(d_ndist); hipFree(d_nidx); hipFree(d_treeMDs); hipFree(d_treeIDXs); hipFree(d_offsetsStack); hipFree(d_lengthsStack); hipFree(d_parentNDistStack); hipFree(d_parentMdStack); hipFree(d_isInnerStack); }
541ff9084bc7b88e1e11acdc4f3331bbaee263e6.cu
#include "device_global_memory.h" #include <math.h> #include <cuda_runtime.h> //vptree.cu global variables double* d_points; double* d_pointsAux; unsigned int* d_indexes; unsigned int* d_indexesAux; unsigned int* d_vpSwaps; double* d_treeMDs; unsigned int* d_treeIDXs; unsigned int* d_nodesOffset; unsigned int* d_nodesLength; //distances.cu global variables double* d_distances; //quick_select.cu global variables double* d_qsAux; unsigned int* d_f; unsigned int* d_t; unsigned int* d_addr; unsigned int* d_NFs; char* d_e; //knn_search.cu global variables double* d_qpoints; double* d_ndist; unsigned int* d_nidx; unsigned int* d_offsetsStack; unsigned int* d_lengthsStack; double* d_parentNDistStack; double* d_parentMdStack; char* d_isInnerStack; /*Returns the smallest power of two*/ static unsigned int smallest_power_two(unsigned int n) { unsigned int N = n; if ((N & (N - 1)) != 0) { // fix if n is not power of 2 N = 1; while (N < n) N <<= 1; } return N; } /*Functions to initialize memory*/ int qs_memory_allocate(unsigned int numberOfPoints, unsigned int maxParallelNodes) { cudaError err; unsigned int fixedNoP = smallest_power_two(numberOfPoints + 1); //quick select needs length in powers of two //quick_select.cu global variables err = cudaMalloc(&d_qsAux, fixedNoP * sizeof(double)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_f, fixedNoP * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_t, fixedNoP * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_addr, fixedNoP * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_NFs, maxParallelNodes * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_e, fixedNoP * sizeof(char)); if (err != cudaSuccess) return err; return cudaSuccess; } int di_memory_allocate(unsigned int numberOfPoints) { cudaError err; unsigned int fixedNoP = smallest_power_two(numberOfPoints + 1); //quick select needs length in powers of two //distances.cu global variables err = cudaMalloc(&d_distances, fixedNoP * sizeof(double)); if (err != cudaSuccess) return err; return cudaSuccess; } int vp_memory_allocate(unsigned int numberOfPoints, unsigned int dimensionOfPoints) { cudaError err; unsigned int fixedNoP = smallest_power_two(numberOfPoints + 1); //quick select needs length in powers of two unsigned int maxNodes = smallest_power_two(numberOfPoints + 1) / 2; //max nodes on the last level of the tree //vptree.cu global variables err = cudaMalloc(&d_points, (numberOfPoints * dimensionOfPoints) * sizeof(double)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_pointsAux, (numberOfPoints * dimensionOfPoints) * sizeof(double)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_indexes, numberOfPoints * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_indexesAux, numberOfPoints * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_vpSwaps, fixedNoP * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_treeMDs, numberOfPoints * sizeof(double)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_treeIDXs, numberOfPoints * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_nodesOffset, maxNodes * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_nodesLength, maxNodes * sizeof(unsigned int)); if (err != cudaSuccess) return err; return cudaSuccess; } int knn_memory_allocate(unsigned int n, unsigned int m, unsigned int d, unsigned int k) { cudaError err; unsigned int maxDepth = (unsigned int)log2f(n) + 1; //knn_search.cu global variables err = cudaMalloc(&d_points, (n * d) * sizeof(double)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_qpoints, (m * d) * sizeof(double)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_ndist, (m * k) * sizeof(double)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_nidx, (m * k) * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_treeMDs, n * sizeof(double)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_treeIDXs, n * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_offsetsStack, (m * maxDepth) * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_lengthsStack, (m * maxDepth) * sizeof(unsigned int)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_parentNDistStack, (m * maxDepth) * sizeof(double)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_parentMdStack, (m * maxDepth) * sizeof(double)); if (err != cudaSuccess) return err; err = cudaMalloc(&d_isInnerStack, (m * maxDepth) * sizeof(char)); if (err != cudaSuccess) return err; return cudaSuccess; } /*Functions to free memory*/ void qs_memory_deallocate() { cudaFree(d_qsAux); cudaFree(d_f); cudaFree(d_t); cudaFree(d_addr); cudaFree(d_e); } void di_memory_deallocate() { cudaFree(d_distances); } void vp_memory_deallocate() { cudaFree(d_points); cudaFree(d_pointsAux); cudaFree(d_indexes); cudaFree(d_indexesAux); cudaFree(d_vpSwaps); cudaFree(d_treeMDs); cudaFree(d_treeIDXs); cudaFree(d_nodesOffset); cudaFree(d_nodesLength); } void knn_deallocate() { cudaFree(d_points); cudaFree(d_qpoints); cudaFree(d_ndist); cudaFree(d_nidx); cudaFree(d_treeMDs); cudaFree(d_treeIDXs); cudaFree(d_offsetsStack); cudaFree(d_lengthsStack); cudaFree(d_parentNDistStack); cudaFree(d_parentMdStack); cudaFree(d_isInnerStack); }
d0b988a02cb472cb8fe74572a2f05febe2586099.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Aurora Renderer * Copyright (c) 2013 Michal Siejak * Licensed under MIT open-source license, see COPYING.txt file for details. */ #include <stdafx.h> #include <kernels/kernels.h> using namespace Aurora; #include <kernels/lib/common.cuh> #include <kernels/lib/intersect.cuh> #include <kernels/lib/bsdf.cuh> #include <kernels/lib/shader.cuh> #include <kernels/lib/light.cuh> #include <kernels/lib/radiance.cuh> #include <kernels/lib/stack.cuh> #include <kernels/lib/texunit.cuh> #define MAX_DEPTH 8 typedef Stack<Ray, MAX_DEPTH+2> RayStack; inline __device__ void raytrace(RNG* rng, const Geometry& geometry, const Shader* shaders, const unsigned int numLights, const Light* lights, RayStack& rs, HitPoint& hp, int& depth) { Ray ray = rs.pop(); if(!intersect(geometry, ray, hp)) return; hp.position = ray.point(); hp.wo = -ray.dir; const Shader& shader = shaders[getSafeID(geometry.shaders[hp.triangleID])]; BSDF bsdf = shader.getBSDF(geometry, hp.triangleID, hp.u, hp.v); if(shader.texture > 0) { const float2 uv = getTexCoord(geometry, hp.triangleID, hp.u, hp.v); bsdf.color1 = texfetch(getID(shader.texture), uv.x, uv.y) * shader.diffuse; } float3 Li = shader.emissionColor; for(int i=0; i<numLights; i++) { const Light& light = lights[i]; if(light.isAmbientLight()) Li = Li + estimateAmbientRadiance(light, bsdf); if(light.isDeltaLight()) Li = Li + estimateDirectRadianceDelta(geometry, light, shader, bsdf, hp); else Li = Li + estimateDirectRadiance(rng, geometry, light, shader, bsdf, hp); } if(shader.reflectivity > 0.0f || shader.translucence > 0.0f) depth++; if(depth <= MAX_DEPTH && shader.reflectivity > 0.0f) { Ray newray(hp.position, reflect(ray.dir, bsdf.N)); newray.weight = shader.reflectivity; newray.eta = ray.eta; ray.weight *= (1.0f - shader.reflectivity); rs.push(newray); } if(depth <= MAX_DEPTH && shader.translucence > 0.0f) { float3 rN = bsdf.N; float etai = ray.eta; float etat = shader.refractiveIndex; if(dot(rN, hp.wo) < 0.0f) { rN = -rN; etai = shader.refractiveIndex; etat = 1.0f; } float3 tdir; if(refract(tdir, ray.dir, rN, etai, etat)) { Ray newray(hp.position, tdir); newray.weight = shader.translucence; newray.eta = etat; ray.weight *= (1.0f - shader.translucence); rs.push(newray); } } hp.color = hp.color + (Li * ray.weight); } __global__ static void cudaRaytraceKernel(const Geometry geometry, const Shader* shaders, const unsigned int numLights, const Light* lights, RNG* grng, const unsigned int offset, const unsigned int numRays, Ray* rays, HitPoint* hits) { const unsigned int threadId = blockDim.x * blockIdx.x + threadIdx.x; if(threadId >= numRays) return; const unsigned int rayId = offset + threadId; RNG rng = grng[rayId]; Ray& ray = rays[rayId]; HitPoint& hp = hits[rayId]; RayStack rs; rs.push(ray); int depth=0; while(depth <= MAX_DEPTH && rs.size > 0) raytrace(&rng, geometry, shaders, numLights, lights, rs, hp, depth); grng[rayId] = rng; } __host__ void cudaRaytraceMonteCarlo(const Geometry& geometry, const ShadersArray& shaders, const TexturesArray& textures, const LightsArray& lights, RNG* rng, const unsigned int offset, const unsigned int numRays, Ray* rays, HitPoint* hits) { dim3 blockSize(64); dim3 gridSize = make_grid(blockSize, dim3(numRays)); bindTextures(textures); hipLaunchKernelGGL(( cudaRaytraceKernel), dim3(gridSize), dim3(blockSize), 0, 0, geometry, shaders.items, lights.size, lights.items, rng, offset, numRays, rays, hits); }
d0b988a02cb472cb8fe74572a2f05febe2586099.cu
/* Aurora Renderer * Copyright (c) 2013 Michal Siejak * Licensed under MIT open-source license, see COPYING.txt file for details. */ #include <stdafx.h> #include <kernels/kernels.h> using namespace Aurora; #include <kernels/lib/common.cuh> #include <kernels/lib/intersect.cuh> #include <kernels/lib/bsdf.cuh> #include <kernels/lib/shader.cuh> #include <kernels/lib/light.cuh> #include <kernels/lib/radiance.cuh> #include <kernels/lib/stack.cuh> #include <kernels/lib/texunit.cuh> #define MAX_DEPTH 8 typedef Stack<Ray, MAX_DEPTH+2> RayStack; inline __device__ void raytrace(RNG* rng, const Geometry& geometry, const Shader* shaders, const unsigned int numLights, const Light* lights, RayStack& rs, HitPoint& hp, int& depth) { Ray ray = rs.pop(); if(!intersect(geometry, ray, hp)) return; hp.position = ray.point(); hp.wo = -ray.dir; const Shader& shader = shaders[getSafeID(geometry.shaders[hp.triangleID])]; BSDF bsdf = shader.getBSDF(geometry, hp.triangleID, hp.u, hp.v); if(shader.texture > 0) { const float2 uv = getTexCoord(geometry, hp.triangleID, hp.u, hp.v); bsdf.color1 = texfetch(getID(shader.texture), uv.x, uv.y) * shader.diffuse; } float3 Li = shader.emissionColor; for(int i=0; i<numLights; i++) { const Light& light = lights[i]; if(light.isAmbientLight()) Li = Li + estimateAmbientRadiance(light, bsdf); if(light.isDeltaLight()) Li = Li + estimateDirectRadianceDelta(geometry, light, shader, bsdf, hp); else Li = Li + estimateDirectRadiance(rng, geometry, light, shader, bsdf, hp); } if(shader.reflectivity > 0.0f || shader.translucence > 0.0f) depth++; if(depth <= MAX_DEPTH && shader.reflectivity > 0.0f) { Ray newray(hp.position, reflect(ray.dir, bsdf.N)); newray.weight = shader.reflectivity; newray.eta = ray.eta; ray.weight *= (1.0f - shader.reflectivity); rs.push(newray); } if(depth <= MAX_DEPTH && shader.translucence > 0.0f) { float3 rN = bsdf.N; float etai = ray.eta; float etat = shader.refractiveIndex; if(dot(rN, hp.wo) < 0.0f) { rN = -rN; etai = shader.refractiveIndex; etat = 1.0f; } float3 tdir; if(refract(tdir, ray.dir, rN, etai, etat)) { Ray newray(hp.position, tdir); newray.weight = shader.translucence; newray.eta = etat; ray.weight *= (1.0f - shader.translucence); rs.push(newray); } } hp.color = hp.color + (Li * ray.weight); } __global__ static void cudaRaytraceKernel(const Geometry geometry, const Shader* shaders, const unsigned int numLights, const Light* lights, RNG* grng, const unsigned int offset, const unsigned int numRays, Ray* rays, HitPoint* hits) { const unsigned int threadId = blockDim.x * blockIdx.x + threadIdx.x; if(threadId >= numRays) return; const unsigned int rayId = offset + threadId; RNG rng = grng[rayId]; Ray& ray = rays[rayId]; HitPoint& hp = hits[rayId]; RayStack rs; rs.push(ray); int depth=0; while(depth <= MAX_DEPTH && rs.size > 0) raytrace(&rng, geometry, shaders, numLights, lights, rs, hp, depth); grng[rayId] = rng; } __host__ void cudaRaytraceMonteCarlo(const Geometry& geometry, const ShadersArray& shaders, const TexturesArray& textures, const LightsArray& lights, RNG* rng, const unsigned int offset, const unsigned int numRays, Ray* rays, HitPoint* hits) { dim3 blockSize(64); dim3 gridSize = make_grid(blockSize, dim3(numRays)); bindTextures(textures); cudaRaytraceKernel<<<gridSize, blockSize>>>(geometry, shaders.items, lights.size, lights.items, rng, offset, numRays, rays, hits); }
5b2cad62a8e6d3af7dafa7283aa7c7ec19c3c861.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2015 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "../debug.h" __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } int main() { int a, b, c; int *d_a, *d_b, *d_c; int size = sizeof( int ); /* get GPU device number and name */ int dev; hipDeviceProp_t deviceProp; checkCUDA( hipGetDevice( &dev ) ); checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); /* allocate space for device copies of a, b, c */ checkCUDA( hipMalloc( (void **) &d_a, size ) ); /* enter code here to malloc d_b and d_c */ FIXME /* setup initial values */ a = 2; b = 7; c = -99; /* copy inputs to device */ checkCUDA( hipMemcpy( d_a, &a, size, hipMemcpyHostToDevice ) ); /* enter code here to copy d_b to device */ FIXME /* enter code here to launch the kernel on the GPU */ FIXME checkKERNEL() /* copy result back to host */ checkCUDA( hipMemcpy( &c, d_c, size, hipMemcpyDeviceToHost ) ); printf("value of c after kernel is %d\n",c); if( c == ( a + b ) ) printf("PASS\n"); else printf("FAIL\n"); /* clean up */ checkCUDA( hipFree( d_a ) ); FIXME /* enter code here to hipFree the d_b and d_c pointers */ /* calling reset to check errors */ checkCUDA( hipDeviceReset() ); return 0; } /* end main */
5b2cad62a8e6d3af7dafa7283aa7c7ec19c3c861.cu
/* * Copyright 2015 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "../debug.h" __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } int main() { int a, b, c; int *d_a, *d_b, *d_c; int size = sizeof( int ); /* get GPU device number and name */ int dev; cudaDeviceProp deviceProp; checkCUDA( cudaGetDevice( &dev ) ); checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); /* allocate space for device copies of a, b, c */ checkCUDA( cudaMalloc( (void **) &d_a, size ) ); /* enter code here to malloc d_b and d_c */ FIXME /* setup initial values */ a = 2; b = 7; c = -99; /* copy inputs to device */ checkCUDA( cudaMemcpy( d_a, &a, size, cudaMemcpyHostToDevice ) ); /* enter code here to copy d_b to device */ FIXME /* enter code here to launch the kernel on the GPU */ FIXME checkKERNEL() /* copy result back to host */ checkCUDA( cudaMemcpy( &c, d_c, size, cudaMemcpyDeviceToHost ) ); printf("value of c after kernel is %d\n",c); if( c == ( a + b ) ) printf("PASS\n"); else printf("FAIL\n"); /* clean up */ checkCUDA( cudaFree( d_a ) ); FIXME /* enter code here to cudaFree the d_b and d_c pointers */ /* calling reset to check errors */ checkCUDA( cudaDeviceReset() ); return 0; } /* end main */
ae606f92632fd725fa31faaeb66b0e35db493d88.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> #define TOTAL_SHARED 25600 #define BLOCK_SIZE_WIDTH 16 #define EXTRA_SHARED_MEM_USAGE ((TOTAL_SHARED - BLOCK_SIZE_WIDTH*BLOCK_SIZE_WIDTH*8) / 4) // assuming all gpus are the same float occupancyCalc(const void* func, int block_size) { struct hipFuncAttributes attr; hipError_t error; error = hipFuncGetAttributes(&attr, func); if (error != hipSuccess) printf("hipFuncGetAttributes returned error code %d, line(%d)\n", error, __LINE__); hipSetDevice(0); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, 0); if (error != hipSuccess) printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); /* prepare some configuration variables */ int maxWarpsPerSM = deviceProp.maxThreadsPerMultiProcessor / deviceProp.warpSize; //printf("max warps per sm: %d\n", maxWarpsPerSM); /* variable starting by b means related to block size */ //printf("block_size: %d\n", block_size); int b_warpsPerBlock = block_size / deviceProp.warpSize + ((block_size % deviceProp.warpSize > 0) ? 1 : 0); //printf("warps per block: %d, block_size/deviceProp.warpSize: %d\n", b_warpsPerBlock, block_size/deviceProp.warpSize); int b_blocksPerSM = maxWarpsPerSM / b_warpsPerBlock; /* variable starting by r means related to register file */ int r_warpsPerBlock = b_warpsPerBlock; if (attr.numRegs > deviceProp.regsPerBlock) { printf("Register usage is larger than %d limit per block.", deviceProp.regsPerBlock); return 0; } int r_warpsPerSM = deviceProp.regsPerMultiprocessor / (deviceProp.warpSize * attr.numRegs); int r_blocksPerSM = r_warpsPerSM / r_warpsPerBlock; /* variable starting by s means related to shared memory */ int s_blocksPerSM = deviceProp.sharedMemPerMultiprocessor / attr.sharedSizeBytes; int numBlocks = (b_blocksPerSM < r_blocksPerSM) ? b_blocksPerSM : r_blocksPerSM; numBlocks = (numBlocks < s_blocksPerSM) ? numBlocks : s_blocksPerSM; //printf("b: %d, r: %d, s: %d\n", b_blocksPerSM, r_blocksPerSM, s_blocksPerSM); int numWarps = numBlocks * b_warpsPerBlock; printf("%d,%d,%d,%d,", attr.numRegs, (int) (attr.sharedSizeBytes), numBlocks, numWarps); float occupancy = (float)numWarps / maxWarpsPerSM; return occupancy; } /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ #define BLOCK_SIZE 32 extern "C" __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; }
ae606f92632fd725fa31faaeb66b0e35db493d88.cu
/** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> #define TOTAL_SHARED 25600 #define BLOCK_SIZE_WIDTH 16 #define EXTRA_SHARED_MEM_USAGE ((TOTAL_SHARED - BLOCK_SIZE_WIDTH*BLOCK_SIZE_WIDTH*8) / 4) // assuming all gpus are the same float occupancyCalc(const void* func, int block_size) { struct cudaFuncAttributes attr; cudaError_t error; error = cudaFuncGetAttributes(&attr, func); if (error != cudaSuccess) printf("cudaFuncGetAttributes returned error code %d, line(%d)\n", error, __LINE__); cudaSetDevice(0); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, 0); if (error != cudaSuccess) printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); /* prepare some configuration variables */ int maxWarpsPerSM = deviceProp.maxThreadsPerMultiProcessor / deviceProp.warpSize; //printf("max warps per sm: %d\n", maxWarpsPerSM); /* variable starting by b means related to block size */ //printf("block_size: %d\n", block_size); int b_warpsPerBlock = block_size / deviceProp.warpSize + ((block_size % deviceProp.warpSize > 0) ? 1 : 0); //printf("warps per block: %d, block_size/deviceProp.warpSize: %d\n", b_warpsPerBlock, block_size/deviceProp.warpSize); int b_blocksPerSM = maxWarpsPerSM / b_warpsPerBlock; /* variable starting by r means related to register file */ int r_warpsPerBlock = b_warpsPerBlock; if (attr.numRegs > deviceProp.regsPerBlock) { printf("Register usage is larger than %d limit per block.", deviceProp.regsPerBlock); return 0; } int r_warpsPerSM = deviceProp.regsPerMultiprocessor / (deviceProp.warpSize * attr.numRegs); int r_blocksPerSM = r_warpsPerSM / r_warpsPerBlock; /* variable starting by s means related to shared memory */ int s_blocksPerSM = deviceProp.sharedMemPerMultiprocessor / attr.sharedSizeBytes; int numBlocks = (b_blocksPerSM < r_blocksPerSM) ? b_blocksPerSM : r_blocksPerSM; numBlocks = (numBlocks < s_blocksPerSM) ? numBlocks : s_blocksPerSM; //printf("b: %d, r: %d, s: %d\n", b_blocksPerSM, r_blocksPerSM, s_blocksPerSM); int numWarps = numBlocks * b_warpsPerBlock; printf("%d,%d,%d,%d,", attr.numRegs, (int) (attr.sharedSizeBytes), numBlocks, numWarps); float occupancy = (float)numWarps / maxWarpsPerSM; return occupancy; } /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ #define BLOCK_SIZE 32 extern "C" __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; }
f2c83260e1a6bff8e4f44c8acc240fbd9bd69f56.hip
// !!! This is a file automatically generated by hipify!!! // Multiple GPU version of cuFFT_check that uses multiple GPU's // This program creates a real-valued 3D function sin(x)*cos(y)*cos(z) and then // takes the forward and inverse Fourier Transform, with the necessary scaling included. // The output of this process should match the input function // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <complex.h> // includes, project #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hipfft.h> #include <hip/hip_complex.h> //CUFFT Header file #include <hipfftXt.h> #define NX 512 #define NY 512 #define NZ 512 #define NZ2 (NZ/2+1) #define NN (NX*NY*NZ) #define L (2*M_PI) #define TX 8 #define TY 8 #define TZ 8 int divUp(int a, int b) { return (a + b - 1) / b; } __device__ int idxClip(int idx, int idxMax){ return idx > (idxMax - 1) ? (idxMax - 1) : (idx < 0 ? 0 : idx); } __device__ int flatten(int col, int row, int stack, int width, int height, int depth){ return idxClip(stack, depth) + idxClip(row, height)*depth + idxClip(col, width)*depth*height; // Note: using column-major indexing format } __global__ void initialize(int NX_per_GPU, int gpuNum, hipfftDoubleComplex *f1, hipfftDoubleComplex *f2) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; const int k = blockIdx.z * blockDim.z + threadIdx.z; if ((i >= NX_per_GPU) || (j >= NY) || (k >= NZ)) return; const int idx = flatten(i, j, k, NX, NY, NZ); // Create physical vectors in temporary memory double x = i * (double)L / NX + (double)gpuNum*NX_per_GPU*L / NX; double y = j * (double)L / NY; double z = k * (double)L / NZ; // Initialize starting array f1[idx].x = sin(x)*cos(y)*cos(z); f1[idx].y = 0.0; f2[idx].x = 0.0; f2[idx].y = 0.0; return; } __global__ void scaleResult(int NX_per_GPU, hipfftDoubleComplex *f) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; const int k = blockIdx.z * blockDim.z + threadIdx.z; if ((i >= NX_per_GPU) || (j >= NY) || (k >= NZ)) return; const int idx = flatten(i, j, k, NX, NY, NZ); f[idx].x = f[idx].x / ( (double)NN ); f[idx].y = f[idx].y / ( (double)NN ); return; } int main (void) { int i, j, k, idx, NX_per_GPU; // double complex test; // Set GPU's to use and list device properties int nGPUs = 2, deviceNum[nGPUs]; for(i = 0; i<nGPUs; ++i) { deviceNum[i] = i; hipSetDevice(deviceNum[i]); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, deviceNum[i]); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } printf("Running Multi_GPU_FFT_check using %d GPUs on a %dx%dx%d grid.\n",nGPUs,NX,NY,NZ); // Initialize input data // Split data according to number of GPUs NX_per_GPU = NX/nGPUs; // This is not a good solution long-term; needs more work for arbitrary grid sizes/nGPUs // Declare variables hipfftDoubleComplex *u; hipfftDoubleComplex *u_fft; // Allocate memory for arrays hipMallocManaged(&u, sizeof(hipfftDoubleComplex)*NN ); hipMallocManaged(&u_fft, sizeof(hipfftDoubleComplex)*NN ); // Launch CUDA kernel to initialize velocity field const dim3 blockSize(TX, TY, TZ); const dim3 gridSize(divUp(NX_per_GPU, TX), divUp(NY, TY), divUp(NZ, TZ)); for (i = 0; i<nGPUs; ++i){ hipSetDevice(deviceNum[i]); int idx = i*NX_per_GPU*NY*NZ; // sets the index value of the data to send to each gpu hipLaunchKernelGGL(( initialize), dim3(gridSize), dim3(blockSize), 0, 0, NX_per_GPU, deviceNum[i], &u[idx], &u_fft[idx]); } // Synchronize both GPUs before moving forward for (i = 0; i<nGPUs; ++i){ hipSetDevice(deviceNum[i]); hipDeviceSynchronize(); } // Initialize CUFFT for multiple GPUs // // Initialize result variable used for error checking hipfftResult result; // Create empty plan that will be used for the FFT hipfftHandle plan; result = hipfftCreate(&plan); if (result != HIPFFT_SUCCESS) { printf ("*Create failed\n"); return 1; } // Tell cuFFT which GPUs to use result = cufftXtSetGPUs (plan, nGPUs, deviceNum); if (result != HIPFFT_SUCCESS) { printf ("*XtSetGPUs failed: code %i\n", result); return 1; } // Create the plan for the FFT size_t *worksize; // Initializes the worksize variable worksize =(size_t*)malloc(sizeof(size_t) * nGPUs); // Allocates memory for the worksize variable, which tells cufft how many GPUs it has to work with // Create the plan for cufft result = hipfftMakePlan3d(plan, NX, NY, NZ, HIPFFT_Z2Z, worksize); if (result != HIPFFT_SUCCESS) { printf ("*MakePlan* failed: code %d \n",(int)result); exit (EXIT_FAILURE) ; } printf("The size of the worksize is %lu\n", worksize[0]); // Initialize transform array - to be split among GPU's and transformed in place using cufftX cudaLibXtDesc *u_prime; // Allocate data on multiple gpus using the cufft routines result = cufftXtMalloc(plan, (cudaLibXtDesc **)&u_prime, CUFFT_XT_FORMAT_INPLACE); if (result != HIPFFT_SUCCESS) { printf ("*XtMalloc failed\n"); exit (EXIT_FAILURE) ; } // Copy the data from 'host' to device using cufftXt formatting result = cufftXtMemcpy(plan, u_prime, u, CUFFT_COPY_HOST_TO_DEVICE); if (result != HIPFFT_SUCCESS) { printf ("*XtMemcpy failed, code: %d\n",result); exit (EXIT_FAILURE); } // Perform FFT on multiple GPUs printf("Forward 3d FFT on multiple GPUs\n"); result = cufftXtExecDescriptorZ2Z(plan, u_prime, u_prime, HIPFFT_FORWARD); if (result != HIPFFT_SUCCESS) { printf ("*XtExecZ2Z failed\n"); exit (EXIT_FAILURE); } ////////// Apparently re-ordering the data prior to the IFFT is not necessary (gives incorrect results)//////////////////// // cudaLibXtDesc *u_reorder; // result = cufftXtMalloc(plan, (cudaLibXtDesc **)&u_reorder, CUFFT_XT_FORMAT_INPLACE); // if (result != HIPFFT_SUCCESS) { printf ("*XtMalloc failed\n"); exit (EXIT_FAILURE) ; } // // Re-order data on multiple GPUs to natural order // printf("Reordering the data on the GPUs\n"); // result = cufftXtMemcpy (plan, u_reorder, u_prime, CUFFT_COPY_DEVICE_TO_DEVICE); // if (result != HIPFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); exit (EXIT_FAILURE); } ///////////////////////////////////////////////////////////////////////////////////////////// // Perform inverse FFT on multiple GPUs printf("Inverse 3d FFT on multiple GPUs\n"); result = cufftXtExecDescriptorZ2Z(plan, u_prime, u_prime, HIPFFT_BACKWARD); if (result != HIPFFT_SUCCESS) { printf ("*XtExecZ2Z failed\n"); exit (EXIT_FAILURE); } // Copy the output data from multiple gpus to the 'host' result variable (automatically reorders the data from output to natural order) result = cufftXtMemcpy (plan, u_fft, u_prime, CUFFT_COPY_DEVICE_TO_HOST); if (result != HIPFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); exit (EXIT_FAILURE); } // Scale output to match input (cuFFT does not automatically scale FFT output by 1/N) for (i = 0; i<nGPUs; ++i){ hipSetDevice(deviceNum[i]); idx = i*NX_per_GPU*NY*NZ; // sets the index value of the data to send to each gpu hipLaunchKernelGGL(( scaleResult), dim3(gridSize), dim3(blockSize), 0, 0, NX_per_GPU, &u_fft[idx]); } // Synchronize GPUs for (i = 0; i<nGPUs; ++i){ hipSetDevice(deviceNum[i]); hipDeviceSynchronize(); } // Test results to make sure that u = u_fft double error = 0.0; for (i = 0; i<NX; ++i){ for (j = 0; j<NY; ++j){ for (k = 0; k<NZ; ++k){ idx = k + j*NZ + NZ*NY*i; // error += (double)u[idx].x - sin(x)*cos(y)*cos(z); error += (double)u[idx].x - (double)u_fft[idx].x; // printf("At idx = %d, the value of the error is %f\n",idx,(double)u[idx].x - (double)u_fft[idx].x); // printf("At idx = %d, the value of the error is %f\n",idx,error); } } } printf("The sum of the error is %4.4g\n",error); // Deallocate variables // Free malloc'ed variables free(worksize); // Free cuda malloc'ed variables hipFree(u); hipFree(u_fft); // Free cufftX malloc'ed variables result = cufftXtFree(u_prime); if (result != HIPFFT_SUCCESS) { printf ("*XtFree failed\n"); exit (EXIT_FAILURE); } // result = cufftXtFree(u_reorder); // if (result != HIPFFT_SUCCESS) { printf ("*XtFree failed\n"); exit (EXIT_FAILURE); } // Destroy FFT plan result = hipfftDestroy(plan); if (result != HIPFFT_SUCCESS) { printf ("hipfftDestroy failed: code %d\n",(int)result); exit (EXIT_FAILURE); } return 0; }
f2c83260e1a6bff8e4f44c8acc240fbd9bd69f56.cu
// Multiple GPU version of cuFFT_check that uses multiple GPU's // This program creates a real-valued 3D function sin(x)*cos(y)*cos(z) and then // takes the forward and inverse Fourier Transform, with the necessary scaling included. // The output of this process should match the input function // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <complex.h> // includes, project #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cufft.h> #include <cuComplex.h> //CUFFT Header file #include <cufftXt.h> #define NX 512 #define NY 512 #define NZ 512 #define NZ2 (NZ/2+1) #define NN (NX*NY*NZ) #define L (2*M_PI) #define TX 8 #define TY 8 #define TZ 8 int divUp(int a, int b) { return (a + b - 1) / b; } __device__ int idxClip(int idx, int idxMax){ return idx > (idxMax - 1) ? (idxMax - 1) : (idx < 0 ? 0 : idx); } __device__ int flatten(int col, int row, int stack, int width, int height, int depth){ return idxClip(stack, depth) + idxClip(row, height)*depth + idxClip(col, width)*depth*height; // Note: using column-major indexing format } __global__ void initialize(int NX_per_GPU, int gpuNum, cufftDoubleComplex *f1, cufftDoubleComplex *f2) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; const int k = blockIdx.z * blockDim.z + threadIdx.z; if ((i >= NX_per_GPU) || (j >= NY) || (k >= NZ)) return; const int idx = flatten(i, j, k, NX, NY, NZ); // Create physical vectors in temporary memory double x = i * (double)L / NX + (double)gpuNum*NX_per_GPU*L / NX; double y = j * (double)L / NY; double z = k * (double)L / NZ; // Initialize starting array f1[idx].x = sin(x)*cos(y)*cos(z); f1[idx].y = 0.0; f2[idx].x = 0.0; f2[idx].y = 0.0; return; } __global__ void scaleResult(int NX_per_GPU, cufftDoubleComplex *f) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; const int k = blockIdx.z * blockDim.z + threadIdx.z; if ((i >= NX_per_GPU) || (j >= NY) || (k >= NZ)) return; const int idx = flatten(i, j, k, NX, NY, NZ); f[idx].x = f[idx].x / ( (double)NN ); f[idx].y = f[idx].y / ( (double)NN ); return; } int main (void) { int i, j, k, idx, NX_per_GPU; // double complex test; // Set GPU's to use and list device properties int nGPUs = 2, deviceNum[nGPUs]; for(i = 0; i<nGPUs; ++i) { deviceNum[i] = i; cudaSetDevice(deviceNum[i]); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, deviceNum[i]); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } printf("Running Multi_GPU_FFT_check using %d GPUs on a %dx%dx%d grid.\n",nGPUs,NX,NY,NZ); // Initialize input data // Split data according to number of GPUs NX_per_GPU = NX/nGPUs; // This is not a good solution long-term; needs more work for arbitrary grid sizes/nGPUs // Declare variables cufftDoubleComplex *u; cufftDoubleComplex *u_fft; // Allocate memory for arrays cudaMallocManaged(&u, sizeof(cufftDoubleComplex)*NN ); cudaMallocManaged(&u_fft, sizeof(cufftDoubleComplex)*NN ); // Launch CUDA kernel to initialize velocity field const dim3 blockSize(TX, TY, TZ); const dim3 gridSize(divUp(NX_per_GPU, TX), divUp(NY, TY), divUp(NZ, TZ)); for (i = 0; i<nGPUs; ++i){ cudaSetDevice(deviceNum[i]); int idx = i*NX_per_GPU*NY*NZ; // sets the index value of the data to send to each gpu initialize<<<gridSize, blockSize>>>(NX_per_GPU, deviceNum[i], &u[idx], &u_fft[idx]); } // Synchronize both GPUs before moving forward for (i = 0; i<nGPUs; ++i){ cudaSetDevice(deviceNum[i]); cudaDeviceSynchronize(); } // Initialize CUFFT for multiple GPUs // // Initialize result variable used for error checking cufftResult result; // Create empty plan that will be used for the FFT cufftHandle plan; result = cufftCreate(&plan); if (result != CUFFT_SUCCESS) { printf ("*Create failed\n"); return 1; } // Tell cuFFT which GPUs to use result = cufftXtSetGPUs (plan, nGPUs, deviceNum); if (result != CUFFT_SUCCESS) { printf ("*XtSetGPUs failed: code %i\n", result); return 1; } // Create the plan for the FFT size_t *worksize; // Initializes the worksize variable worksize =(size_t*)malloc(sizeof(size_t) * nGPUs); // Allocates memory for the worksize variable, which tells cufft how many GPUs it has to work with // Create the plan for cufft result = cufftMakePlan3d(plan, NX, NY, NZ, CUFFT_Z2Z, worksize); if (result != CUFFT_SUCCESS) { printf ("*MakePlan* failed: code %d \n",(int)result); exit (EXIT_FAILURE) ; } printf("The size of the worksize is %lu\n", worksize[0]); // Initialize transform array - to be split among GPU's and transformed in place using cufftX cudaLibXtDesc *u_prime; // Allocate data on multiple gpus using the cufft routines result = cufftXtMalloc(plan, (cudaLibXtDesc **)&u_prime, CUFFT_XT_FORMAT_INPLACE); if (result != CUFFT_SUCCESS) { printf ("*XtMalloc failed\n"); exit (EXIT_FAILURE) ; } // Copy the data from 'host' to device using cufftXt formatting result = cufftXtMemcpy(plan, u_prime, u, CUFFT_COPY_HOST_TO_DEVICE); if (result != CUFFT_SUCCESS) { printf ("*XtMemcpy failed, code: %d\n",result); exit (EXIT_FAILURE); } // Perform FFT on multiple GPUs printf("Forward 3d FFT on multiple GPUs\n"); result = cufftXtExecDescriptorZ2Z(plan, u_prime, u_prime, CUFFT_FORWARD); if (result != CUFFT_SUCCESS) { printf ("*XtExecZ2Z failed\n"); exit (EXIT_FAILURE); } ////////// Apparently re-ordering the data prior to the IFFT is not necessary (gives incorrect results)//////////////////// // cudaLibXtDesc *u_reorder; // result = cufftXtMalloc(plan, (cudaLibXtDesc **)&u_reorder, CUFFT_XT_FORMAT_INPLACE); // if (result != CUFFT_SUCCESS) { printf ("*XtMalloc failed\n"); exit (EXIT_FAILURE) ; } // // Re-order data on multiple GPUs to natural order // printf("Reordering the data on the GPUs\n"); // result = cufftXtMemcpy (plan, u_reorder, u_prime, CUFFT_COPY_DEVICE_TO_DEVICE); // if (result != CUFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); exit (EXIT_FAILURE); } ///////////////////////////////////////////////////////////////////////////////////////////// // Perform inverse FFT on multiple GPUs printf("Inverse 3d FFT on multiple GPUs\n"); result = cufftXtExecDescriptorZ2Z(plan, u_prime, u_prime, CUFFT_INVERSE); if (result != CUFFT_SUCCESS) { printf ("*XtExecZ2Z failed\n"); exit (EXIT_FAILURE); } // Copy the output data from multiple gpus to the 'host' result variable (automatically reorders the data from output to natural order) result = cufftXtMemcpy (plan, u_fft, u_prime, CUFFT_COPY_DEVICE_TO_HOST); if (result != CUFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); exit (EXIT_FAILURE); } // Scale output to match input (cuFFT does not automatically scale FFT output by 1/N) for (i = 0; i<nGPUs; ++i){ cudaSetDevice(deviceNum[i]); idx = i*NX_per_GPU*NY*NZ; // sets the index value of the data to send to each gpu scaleResult<<<gridSize, blockSize>>>(NX_per_GPU, &u_fft[idx]); } // Synchronize GPUs for (i = 0; i<nGPUs; ++i){ cudaSetDevice(deviceNum[i]); cudaDeviceSynchronize(); } // Test results to make sure that u = u_fft double error = 0.0; for (i = 0; i<NX; ++i){ for (j = 0; j<NY; ++j){ for (k = 0; k<NZ; ++k){ idx = k + j*NZ + NZ*NY*i; // error += (double)u[idx].x - sin(x)*cos(y)*cos(z); error += (double)u[idx].x - (double)u_fft[idx].x; // printf("At idx = %d, the value of the error is %f\n",idx,(double)u[idx].x - (double)u_fft[idx].x); // printf("At idx = %d, the value of the error is %f\n",idx,error); } } } printf("The sum of the error is %4.4g\n",error); // Deallocate variables // Free malloc'ed variables free(worksize); // Free cuda malloc'ed variables cudaFree(u); cudaFree(u_fft); // Free cufftX malloc'ed variables result = cufftXtFree(u_prime); if (result != CUFFT_SUCCESS) { printf ("*XtFree failed\n"); exit (EXIT_FAILURE); } // result = cufftXtFree(u_reorder); // if (result != CUFFT_SUCCESS) { printf ("*XtFree failed\n"); exit (EXIT_FAILURE); } // Destroy FFT plan result = cufftDestroy(plan); if (result != CUFFT_SUCCESS) { printf ("cufftDestroy failed: code %d\n",(int)result); exit (EXIT_FAILURE); } return 0; }
1965878f0cbb35c43a9fb3af462eec7335bb57db.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda/cuda_utils.h" #include <algorithm> #include <functional> #include <iostream> #include <inttypes.h> #include <iterator> #include <stdint.h> #include <typeinfo> #include <omp.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include "common.h" #include "cuda/cuda_custom_type.h" #include "omp/omp_utils.h" #ifdef __DO_BENCHMARK #include "timer.h" #endif namespace gpusort { // // Global timers. // #ifdef __DO_BENCHMARK Timer local_sort_mem_getinfo_tm; Timer local_sort_merge_tm; Timer local_sort_sort_tm; Timer local_sort_transfer_tm; #endif // // Declare internal functions. // template<typename K> static inline thrust::less<K> Convert2ThrustComp(std::less<K> comp) { return thrust::less<K>(); } template<typename K> static inline thrust::greater<K> Convert2ThrustComp(std::greater<K> comp) { return thrust::greater<K>(); } template<typename StrictWeakOrdering, typename DifferenceType, typename RandomAccessIterator> __host__ void ParallelMergeArrays(StrictWeakOrdering comp, int num_threads, int num_chunks, const DifferenceType *displs, RandomAccessIterator in, RandomAccessIterator out); void RaiseError(hipError_t err) throw(ThrustException); void SelectDevice(int rank) throw(ThrustException); template<typename StrictWeakOrdering, typename RandomAccessIterator> void SortByThrust(StrictWeakOrdering comp, RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator out) throw(ThrustException); template<typename StrictWeakOrdering, typename RandomAccessIterator1, typename RandomAccessIterator2, typename KeyValuePair> void SortByThrust(StrictWeakOrdering comp, RandomAccessIterator1 key_first, RandomAccessIterator1 key_last, RandomAccessIterator2 val_first, KeyValuePair *out) throw(ThrustException); void SplitChunks(int64_t num_elems, int64_t size, int64_t *chunk_size, int *num_chunks) throw(ThrustException); // // Implement Common API. // int CudaUtils::GetDeviceCount() throw(ThrustException) { int num_dev = 0; hipError_t cuda_state = hipGetDeviceCount(&num_dev); if (cuda_state != hipSuccess) { RaiseError(cuda_state); } return num_dev; } bool CudaUtils::IsHavingDevices() throw(ThrustException) { return (GetDeviceCount() > 0); } // // Implement Sort API. // template<typename StrictWeakOrdering, typename RandomAccessIterator> __host__ void CudaUtils::Sort( StrictWeakOrdering comp, int rank, RandomAccessIterator first, RandomAccessIterator last) throw(ThrustException) { typedef typename std::iterator_traits<RandomAccessIterator>::difference_type DiffType; typedef typename std::iterator_traits<RandomAccessIterator>::value_type ValueType; #ifdef __DEBUG_MSG if (rank == 0) std::cout << "start CudaUtils::Sort" << std::endl; #endif int64_t size = last - first, chunk_size = 0; int num_chunks = 0; SelectDevice(rank); SplitChunks(size, sizeof(ValueType), &chunk_size, &num_chunks); int tree_height = ceil(log(num_chunks) / log(2)); if (num_chunks == 1) { SortByThrust(comp, first, last, first); } else { int64_t remainder = size % chunk_size; RandomAccessIterator buffer = new ValueType[size]; int64_t *displs = new DiffType[num_chunks+1]; displs[0] = 0; displs[num_chunks] = size; // sort part RandomAccessIterator tmp_first = first; RandomAccessIterator tmp_last = first + chunk_size; RandomAccessIterator tmp_buf_first = buffer; RandomAccessIterator tmp_buf_last = buffer + chunk_size; for (int i = 0; i < num_chunks; i++) { displs[i+1] = displs[i] + chunk_size; if (i == num_chunks-2 && remainder != 0) { chunk_size = remainder; } try { if (tree_height % 2 == 0) { SortByThrust(comp, tmp_first, tmp_last, tmp_first); } else { SortByThrust(comp, tmp_first, tmp_last, tmp_buf_first); } } catch (ThrustException &e) { delete[] buffer; delete[] displs; throw e; } tmp_first = tmp_last; tmp_last = tmp_first + chunk_size; tmp_buf_first = tmp_buf_last; tmp_buf_last = tmp_buf_first + chunk_size; } int max_threads = OmpUtils::GetMaxThreads(); #ifdef __DO_BENCHMARK local_sort_merge_tm.Start(); #endif if (tree_height % 2 == 0) { ParallelMergeArrays(comp, max_threads, num_chunks, displs, first, buffer); } else { ParallelMergeArrays(comp, max_threads, num_chunks, displs, buffer, first); } #ifdef __DO_BENCHMARK local_sort_merge_tm.Stop(); #endif delete[] buffer; delete[] displs; } } template<typename StrictWeakOrdering, typename RandomAccessIterator1, typename RandomAccessIterator2> __host__ void CudaUtils::SortByKey( StrictWeakOrdering comp, int rank, RandomAccessIterator1 key_first, RandomAccessIterator1 key_last, RandomAccessIterator2 val_first, void *_out) throw(ThrustException) { typedef typename std::iterator_traits<RandomAccessIterator1>::difference_type DiffType; typedef typename std::iterator_traits<RandomAccessIterator1>::value_type KeyType; typedef typename std::iterator_traits<RandomAccessIterator2>::value_type ValueType; typedef std::pair<KeyType, ValueType> KeyValuePair; KeyValuePair *out = reinterpret_cast<KeyValuePair*>(_out); int64_t size = key_last - key_first, chunk_size = 0; int num_chunks = 0; SelectDevice(rank); SplitChunks(size, sizeof(ValueType) + sizeof(KeyType), &chunk_size, &num_chunks); int tree_height = ceil(log(num_chunks) / log(2)); if (num_chunks == 1) { SortByThrust(comp, key_first, key_last, val_first, out); } else { int64_t remainder = size % chunk_size; KeyValuePair *buffer = new KeyValuePair[size]; int64_t *displs = new DiffType[num_chunks+1]; displs[0] = 0; displs[num_chunks] = size; // sort part int64_t d1 = 0; int64_t d2 = d1 + chunk_size; for (int i = 0; i < num_chunks; i++) { try { if (tree_height % 2 == 0) { SortByThrust(comp, key_first + d1, key_first + d2, val_first + d1, out + d1); } else { SortByThrust(comp, key_first + d1, key_first + d2, val_first + d1, buffer + d1); } } catch (ThrustException &e) { delete[] buffer; delete[] displs; throw e; } if (i < num_chunks - 1) displs[i+1] = displs[i] + chunk_size; d1 += chunk_size; d2 = d1 + ((i == num_chunks-2 && remainder != 0)? remainder : chunk_size); } int max_threads = OmpUtils::GetMaxThreads(); #ifdef __DO_BENCHMARK local_sort_merge_tm.Start(); #endif if (tree_height % 2 == 0) { ParallelMergeArrays(ConvertComp<KeyValuePair>(comp), max_threads, num_chunks, displs, out, buffer); } else { ParallelMergeArrays(ConvertComp<KeyValuePair>(comp), max_threads, num_chunks, displs, buffer, out); } #ifdef __DO_BENCHMARK local_sort_merge_tm.Stop(); #endif delete[] buffer; delete[] displs; } } // // Pre-define template parameters for Sort function // to pass the compiler linkage. // #define DECLARE_SORT_TEMPLATE(key_t)\ template void CudaUtils::Sort<std::less<key_t>, key_t*>(\ std::less<key_t> comp, int rank,\ key_t* first, key_t* last) throw(ThrustException);\ template void CudaUtils::Sort<std::greater<key_t>, key_t*>(\ std::greater<key_t> comp, int rank,\ key_t* first, key_t* last) throw(ThrustException); DECLARE_SORT_TEMPLATE(int); // For primitive type: int. DECLARE_SORT_TEMPLATE(unsigned int); // For primitive type: unsigned int. DECLARE_SORT_TEMPLATE(float); // For primitive type: float. DECLARE_SORT_TEMPLATE(double); // For primitive type: double. DECLARE_SORT_TEMPLATE(int64_t); // For primitive type: int64_t. DECLARE_SORT_TEMPLATE(uint64_t); // For primitive type: uint64_t. DECLARE_SORT_TEMPLATE(CudaCustomType<int>); // For custom data type // (key type is int). DECLARE_SORT_TEMPLATE(CudaCustomType<unsigned int>); // For custom data type // (key type is uint). DECLARE_SORT_TEMPLATE(CudaCustomType<float>); // For custom data type // (key type is float). DECLARE_SORT_TEMPLATE(CudaCustomType<double>); // For custom data type // (key type is double). DECLARE_SORT_TEMPLATE(CudaCustomType<int64_t>); // For custom data type // (key type is int64_t). DECLARE_SORT_TEMPLATE(CudaCustomType<uint64_t>); // For custom data type // (key type is uint64_t). // // Pre-define template parameters for SortByKey function // to pass the compiler linkage. // #define DECLARE_SORT_BY_KEY_TEMPLATE(key_t, value_t)\ template void CudaUtils::SortByKey<std::less<key_t>, key_t*, value_t*>(\ std::less<key_t> comp, int rank, key_t* k_first, key_t* k_last,\ value_t* v_first, void *_out) throw(ThrustException);\ template void CudaUtils::SortByKey<std::greater<key_t>, key_t*, value_t*>(\ std::greater<key_t> comp, int rank, key_t* k_first, key_t* k_last,\ value_t* v_first, void *_out) throw(ThrustException); DECLARE_SORT_BY_KEY_TEMPLATE(int, int); DECLARE_SORT_BY_KEY_TEMPLATE(unsigned int, unsigned int); DECLARE_SORT_BY_KEY_TEMPLATE(float, float); DECLARE_SORT_BY_KEY_TEMPLATE(double, double); DECLARE_SORT_BY_KEY_TEMPLATE(int64_t, int64_t); DECLARE_SORT_BY_KEY_TEMPLATE(uint64_t, uint64_t); #define DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(type1, type2)\ DECLARE_SORT_BY_KEY_TEMPLATE(type1, type2);\ DECLARE_SORT_BY_KEY_TEMPLATE(type2, type1); // int pair with the remainders. DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int, unsigned int); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int, float); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int, double); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int, int64_t); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int, uint64_t); // unsigned int pair with the remainders. DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(unsigned int, float); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(unsigned int, double); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(unsigned int, int64_t); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(unsigned int, uint64_t); // float pair with the remainders. DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(float, double); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(float, int64_t); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(float, uint64_t); // double pair with the remainders. DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(double, int64_t); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(double, uint64_t); // int64_t pair with the remainders. DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int64_t, uint64_t); // // Implement internal functions. // template<typename StrictWeakOrdering, typename DifferenceType, typename RandomAccessIterator> __host__ void ParallelMergeArrays(StrictWeakOrdering comp, int num_threads, int num_chunks, const DifferenceType *displs, RandomAccessIterator in, RandomAccessIterator out) { int current_height = 0; int num_merge = (num_chunks + 2 - 1) / 2; int stride = 1; while (num_merge > 0) { RandomAccessIterator first1, last1, first2, last2, first3; for (int i = 0; i < num_merge; i++) { int offset_idx = i * stride * 2; int idx_first1 = (offset_idx > num_chunks)? num_chunks : offset_idx; int idx_last1 = offset_idx + stride; idx_last1 = (idx_last1 > num_chunks)? num_chunks : idx_last1; int idx_first2 = offset_idx + stride; idx_first2 = (idx_first2 > num_chunks)? num_chunks : idx_first2; int idx_last2 = offset_idx + (2 * stride); idx_last2 = (idx_last2 > num_chunks)? num_chunks : idx_last2; int idx_first3 = offset_idx; if (current_height % 2 == 0) { first1 = &in[displs[idx_first1]]; last1 = &in[displs[idx_last1]]; first2 = &in[displs[idx_first2]]; last2 = &in[displs[idx_last2]]; first3 = &out[displs[idx_first3]]; } else if (current_height % 2 == 1) { first1 = &out[displs[idx_first1]]; last1 = &out[displs[idx_last1]]; first2 = &out[displs[idx_first2]]; last2 = &out[displs[idx_last2]]; first3 = &in[displs[idx_first3]]; } OmpUtils::Merge(comp, first1, last1, first2, last2, first3); } if (num_merge == 1) { num_merge = 0; } else { num_merge = (num_merge + 2 - 1) / 2; stride = stride * 2; current_height++; } } } void RaiseError(hipError_t err) throw(ThrustException) { switch (err) { case hipErrorDeviceAlreadyInUse: throw ThrustException("A call tried to access an exclusive-thread device" " that is already in use by a different thread"); case hipErrorInsufficientDriver: throw ThrustException("The installed CUDA driver is older than" " the CUDA runtime library"); case hipErrorInvalidDevice: throw ThrustException("The device ordinal supplied by the user does not" " correspond to a valid CUDA device"); case hipErrorNoDevice: throw ThrustException("No CUDA-capable devices were detected"); default: throw ThrustException("Unknown error occurred"); } } void SelectDevice(int rank) throw(ThrustException) { int num_dev = CudaUtils::GetDeviceCount(); hipError_t cuda_state = hipSetDevice((rank + 1) % num_dev); if (cuda_state != hipSuccess) { RaiseError(cuda_state); } } template<typename StrictWeakOrdering, typename RandomAccessIterator> void SortByThrust(StrictWeakOrdering comp, RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator out) throw(ThrustException) { typedef typename std::iterator_traits<RandomAccessIterator>::value_type ValueType; #ifdef __DO_BENCHMARK local_sort_transfer_tm.Start(); #endif thrust::device_vector<ValueType> d_v(0); try { d_v.assign(first, last); } catch(std::bad_alloc &e) { throw ThrustException("Couldn't allocate device vector"); } #ifdef __DO_BENCHMARK local_sort_transfer_tm.Stop(); local_sort_sort_tm.Start(); #endif try { thrust::sort(d_v.begin(), d_v.end(), Convert2ThrustComp(comp)); } catch (std::bad_alloc &e) { throw ThrustException("Ran out of memory while sorting"); } catch (thrust::system_error &e) { throw ThrustException("Some other error happened during sort: " + std::string(e.what())); } #ifdef __DO_BENCHMARK local_sort_sort_tm.Stop(); local_sort_transfer_tm.Start(); #endif try { thrust::copy(d_v.begin(), d_v.end(), out); } catch (thrust::system_error &e) { throw ThrustException("Some other error happened during copy: " + std::string(e.what())); } #ifdef __DO_BENCHMARK local_sort_transfer_tm.Stop(); #endif } template<typename StrictWeakOrdering, typename RandomAccessIterator1, typename RandomAccessIterator2, typename KeyValuePair> void SortByThrust(StrictWeakOrdering comp, RandomAccessIterator1 key_first, RandomAccessIterator1 key_last, RandomAccessIterator2 val_first, KeyValuePair *out) throw(ThrustException) { typedef typename std::iterator_traits<RandomAccessIterator1>::value_type KeyType; typedef typename std::iterator_traits<RandomAccessIterator2>::value_type ValueType; int64_t nelem = key_last - key_first; #ifdef __DO_BENCHMARK local_sort_transfer_tm.Start(); #endif thrust::device_vector<KeyType> d_k(0); thrust::device_vector<ValueType> d_v(0); try { d_k.assign(key_first, key_last); d_v.assign(val_first, val_first + nelem); } catch(std::bad_alloc &e) { throw ThrustException("Couldn't allocate device vector"); } #ifdef __DO_BENCHMARK local_sort_transfer_tm.Stop(); local_sort_sort_tm.Start(); #endif try { thrust::sort_by_key(d_k.begin(), d_k.end(), d_v.begin(), Convert2ThrustComp(comp)); } catch (std::bad_alloc &e) { throw ThrustException("Ran out of memory while sorting"); } catch (thrust::system_error &e) { throw ThrustException("Some other error happened during sort: " + std::string(e.what())); } #ifdef __DO_BENCHMARK local_sort_sort_tm.Stop(); local_sort_transfer_tm.Start(); #endif try { thrust::copy(d_k.begin(), d_k.end(), key_first); thrust::copy(d_v.begin(), d_v.end(), val_first); } catch (thrust::system_error &e) { throw ThrustException("Some other error happened during copy: " + std::string(e.what())); } int n_threads = OmpUtils::GetMaxThreads(); FOR_PARALLEL(n_threads, nelem, i, { out[i] = std::make_pair(key_first[i], val_first[i]); }); #ifdef __DO_BENCHMARK local_sort_transfer_tm.Stop(); #endif } void SplitChunks(int64_t num_elems, int64_t size, int64_t *chunk_size, int *num_chunks) throw(ThrustException) { size_t mem_avai = 1, mem_total = 1; local_sort_mem_getinfo_tm.Start(); hipError_t cuda_state = hipMemGetInfo(&mem_avai, &mem_total); local_sort_mem_getinfo_tm.Stop(); if (cuda_state != hipSuccess) RaiseError(cuda_state); int64_t chunk_size_ = mem_avai / (3 * size); int num_chunks_ = (num_elems + chunk_size_ - 1) / chunk_size_; *chunk_size = chunk_size_; *num_chunks = num_chunks_; } } // namespace gpusort
1965878f0cbb35c43a9fb3af462eec7335bb57db.cu
#include "cuda/cuda_utils.h" #include <algorithm> #include <functional> #include <iostream> #include <inttypes.h> #include <iterator> #include <stdint.h> #include <typeinfo> #include <omp.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include "common.h" #include "cuda/cuda_custom_type.h" #include "omp/omp_utils.h" #ifdef __DO_BENCHMARK #include "timer.h" #endif namespace gpusort { // // Global timers. // #ifdef __DO_BENCHMARK Timer local_sort_mem_getinfo_tm; Timer local_sort_merge_tm; Timer local_sort_sort_tm; Timer local_sort_transfer_tm; #endif // // Declare internal functions. // template<typename K> static inline thrust::less<K> Convert2ThrustComp(std::less<K> comp) { return thrust::less<K>(); } template<typename K> static inline thrust::greater<K> Convert2ThrustComp(std::greater<K> comp) { return thrust::greater<K>(); } template<typename StrictWeakOrdering, typename DifferenceType, typename RandomAccessIterator> __host__ void ParallelMergeArrays(StrictWeakOrdering comp, int num_threads, int num_chunks, const DifferenceType *displs, RandomAccessIterator in, RandomAccessIterator out); void RaiseError(cudaError_t err) throw(ThrustException); void SelectDevice(int rank) throw(ThrustException); template<typename StrictWeakOrdering, typename RandomAccessIterator> void SortByThrust(StrictWeakOrdering comp, RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator out) throw(ThrustException); template<typename StrictWeakOrdering, typename RandomAccessIterator1, typename RandomAccessIterator2, typename KeyValuePair> void SortByThrust(StrictWeakOrdering comp, RandomAccessIterator1 key_first, RandomAccessIterator1 key_last, RandomAccessIterator2 val_first, KeyValuePair *out) throw(ThrustException); void SplitChunks(int64_t num_elems, int64_t size, int64_t *chunk_size, int *num_chunks) throw(ThrustException); // // Implement Common API. // int CudaUtils::GetDeviceCount() throw(ThrustException) { int num_dev = 0; cudaError_t cuda_state = cudaGetDeviceCount(&num_dev); if (cuda_state != cudaSuccess) { RaiseError(cuda_state); } return num_dev; } bool CudaUtils::IsHavingDevices() throw(ThrustException) { return (GetDeviceCount() > 0); } // // Implement Sort API. // template<typename StrictWeakOrdering, typename RandomAccessIterator> __host__ void CudaUtils::Sort( StrictWeakOrdering comp, int rank, RandomAccessIterator first, RandomAccessIterator last) throw(ThrustException) { typedef typename std::iterator_traits<RandomAccessIterator>::difference_type DiffType; typedef typename std::iterator_traits<RandomAccessIterator>::value_type ValueType; #ifdef __DEBUG_MSG if (rank == 0) std::cout << "start CudaUtils::Sort" << std::endl; #endif int64_t size = last - first, chunk_size = 0; int num_chunks = 0; SelectDevice(rank); SplitChunks(size, sizeof(ValueType), &chunk_size, &num_chunks); int tree_height = ceil(log(num_chunks) / log(2)); if (num_chunks == 1) { SortByThrust(comp, first, last, first); } else { int64_t remainder = size % chunk_size; RandomAccessIterator buffer = new ValueType[size]; int64_t *displs = new DiffType[num_chunks+1]; displs[0] = 0; displs[num_chunks] = size; // sort part RandomAccessIterator tmp_first = first; RandomAccessIterator tmp_last = first + chunk_size; RandomAccessIterator tmp_buf_first = buffer; RandomAccessIterator tmp_buf_last = buffer + chunk_size; for (int i = 0; i < num_chunks; i++) { displs[i+1] = displs[i] + chunk_size; if (i == num_chunks-2 && remainder != 0) { chunk_size = remainder; } try { if (tree_height % 2 == 0) { SortByThrust(comp, tmp_first, tmp_last, tmp_first); } else { SortByThrust(comp, tmp_first, tmp_last, tmp_buf_first); } } catch (ThrustException &e) { delete[] buffer; delete[] displs; throw e; } tmp_first = tmp_last; tmp_last = tmp_first + chunk_size; tmp_buf_first = tmp_buf_last; tmp_buf_last = tmp_buf_first + chunk_size; } int max_threads = OmpUtils::GetMaxThreads(); #ifdef __DO_BENCHMARK local_sort_merge_tm.Start(); #endif if (tree_height % 2 == 0) { ParallelMergeArrays(comp, max_threads, num_chunks, displs, first, buffer); } else { ParallelMergeArrays(comp, max_threads, num_chunks, displs, buffer, first); } #ifdef __DO_BENCHMARK local_sort_merge_tm.Stop(); #endif delete[] buffer; delete[] displs; } } template<typename StrictWeakOrdering, typename RandomAccessIterator1, typename RandomAccessIterator2> __host__ void CudaUtils::SortByKey( StrictWeakOrdering comp, int rank, RandomAccessIterator1 key_first, RandomAccessIterator1 key_last, RandomAccessIterator2 val_first, void *_out) throw(ThrustException) { typedef typename std::iterator_traits<RandomAccessIterator1>::difference_type DiffType; typedef typename std::iterator_traits<RandomAccessIterator1>::value_type KeyType; typedef typename std::iterator_traits<RandomAccessIterator2>::value_type ValueType; typedef std::pair<KeyType, ValueType> KeyValuePair; KeyValuePair *out = reinterpret_cast<KeyValuePair*>(_out); int64_t size = key_last - key_first, chunk_size = 0; int num_chunks = 0; SelectDevice(rank); SplitChunks(size, sizeof(ValueType) + sizeof(KeyType), &chunk_size, &num_chunks); int tree_height = ceil(log(num_chunks) / log(2)); if (num_chunks == 1) { SortByThrust(comp, key_first, key_last, val_first, out); } else { int64_t remainder = size % chunk_size; KeyValuePair *buffer = new KeyValuePair[size]; int64_t *displs = new DiffType[num_chunks+1]; displs[0] = 0; displs[num_chunks] = size; // sort part int64_t d1 = 0; int64_t d2 = d1 + chunk_size; for (int i = 0; i < num_chunks; i++) { try { if (tree_height % 2 == 0) { SortByThrust(comp, key_first + d1, key_first + d2, val_first + d1, out + d1); } else { SortByThrust(comp, key_first + d1, key_first + d2, val_first + d1, buffer + d1); } } catch (ThrustException &e) { delete[] buffer; delete[] displs; throw e; } if (i < num_chunks - 1) displs[i+1] = displs[i] + chunk_size; d1 += chunk_size; d2 = d1 + ((i == num_chunks-2 && remainder != 0)? remainder : chunk_size); } int max_threads = OmpUtils::GetMaxThreads(); #ifdef __DO_BENCHMARK local_sort_merge_tm.Start(); #endif if (tree_height % 2 == 0) { ParallelMergeArrays(ConvertComp<KeyValuePair>(comp), max_threads, num_chunks, displs, out, buffer); } else { ParallelMergeArrays(ConvertComp<KeyValuePair>(comp), max_threads, num_chunks, displs, buffer, out); } #ifdef __DO_BENCHMARK local_sort_merge_tm.Stop(); #endif delete[] buffer; delete[] displs; } } // // Pre-define template parameters for Sort function // to pass the compiler linkage. // #define DECLARE_SORT_TEMPLATE(key_t)\ template void CudaUtils::Sort<std::less<key_t>, key_t*>(\ std::less<key_t> comp, int rank,\ key_t* first, key_t* last) throw(ThrustException);\ template void CudaUtils::Sort<std::greater<key_t>, key_t*>(\ std::greater<key_t> comp, int rank,\ key_t* first, key_t* last) throw(ThrustException); DECLARE_SORT_TEMPLATE(int); // For primitive type: int. DECLARE_SORT_TEMPLATE(unsigned int); // For primitive type: unsigned int. DECLARE_SORT_TEMPLATE(float); // For primitive type: float. DECLARE_SORT_TEMPLATE(double); // For primitive type: double. DECLARE_SORT_TEMPLATE(int64_t); // For primitive type: int64_t. DECLARE_SORT_TEMPLATE(uint64_t); // For primitive type: uint64_t. DECLARE_SORT_TEMPLATE(CudaCustomType<int>); // For custom data type // (key type is int). DECLARE_SORT_TEMPLATE(CudaCustomType<unsigned int>); // For custom data type // (key type is uint). DECLARE_SORT_TEMPLATE(CudaCustomType<float>); // For custom data type // (key type is float). DECLARE_SORT_TEMPLATE(CudaCustomType<double>); // For custom data type // (key type is double). DECLARE_SORT_TEMPLATE(CudaCustomType<int64_t>); // For custom data type // (key type is int64_t). DECLARE_SORT_TEMPLATE(CudaCustomType<uint64_t>); // For custom data type // (key type is uint64_t). // // Pre-define template parameters for SortByKey function // to pass the compiler linkage. // #define DECLARE_SORT_BY_KEY_TEMPLATE(key_t, value_t)\ template void CudaUtils::SortByKey<std::less<key_t>, key_t*, value_t*>(\ std::less<key_t> comp, int rank, key_t* k_first, key_t* k_last,\ value_t* v_first, void *_out) throw(ThrustException);\ template void CudaUtils::SortByKey<std::greater<key_t>, key_t*, value_t*>(\ std::greater<key_t> comp, int rank, key_t* k_first, key_t* k_last,\ value_t* v_first, void *_out) throw(ThrustException); DECLARE_SORT_BY_KEY_TEMPLATE(int, int); DECLARE_SORT_BY_KEY_TEMPLATE(unsigned int, unsigned int); DECLARE_SORT_BY_KEY_TEMPLATE(float, float); DECLARE_SORT_BY_KEY_TEMPLATE(double, double); DECLARE_SORT_BY_KEY_TEMPLATE(int64_t, int64_t); DECLARE_SORT_BY_KEY_TEMPLATE(uint64_t, uint64_t); #define DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(type1, type2)\ DECLARE_SORT_BY_KEY_TEMPLATE(type1, type2);\ DECLARE_SORT_BY_KEY_TEMPLATE(type2, type1); // int pair with the remainders. DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int, unsigned int); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int, float); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int, double); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int, int64_t); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int, uint64_t); // unsigned int pair with the remainders. DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(unsigned int, float); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(unsigned int, double); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(unsigned int, int64_t); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(unsigned int, uint64_t); // float pair with the remainders. DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(float, double); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(float, int64_t); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(float, uint64_t); // double pair with the remainders. DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(double, int64_t); DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(double, uint64_t); // int64_t pair with the remainders. DECLARE_SORT_BY_KEY_PAIR_TEMPLATE(int64_t, uint64_t); // // Implement internal functions. // template<typename StrictWeakOrdering, typename DifferenceType, typename RandomAccessIterator> __host__ void ParallelMergeArrays(StrictWeakOrdering comp, int num_threads, int num_chunks, const DifferenceType *displs, RandomAccessIterator in, RandomAccessIterator out) { int current_height = 0; int num_merge = (num_chunks + 2 - 1) / 2; int stride = 1; while (num_merge > 0) { RandomAccessIterator first1, last1, first2, last2, first3; for (int i = 0; i < num_merge; i++) { int offset_idx = i * stride * 2; int idx_first1 = (offset_idx > num_chunks)? num_chunks : offset_idx; int idx_last1 = offset_idx + stride; idx_last1 = (idx_last1 > num_chunks)? num_chunks : idx_last1; int idx_first2 = offset_idx + stride; idx_first2 = (idx_first2 > num_chunks)? num_chunks : idx_first2; int idx_last2 = offset_idx + (2 * stride); idx_last2 = (idx_last2 > num_chunks)? num_chunks : idx_last2; int idx_first3 = offset_idx; if (current_height % 2 == 0) { first1 = &in[displs[idx_first1]]; last1 = &in[displs[idx_last1]]; first2 = &in[displs[idx_first2]]; last2 = &in[displs[idx_last2]]; first3 = &out[displs[idx_first3]]; } else if (current_height % 2 == 1) { first1 = &out[displs[idx_first1]]; last1 = &out[displs[idx_last1]]; first2 = &out[displs[idx_first2]]; last2 = &out[displs[idx_last2]]; first3 = &in[displs[idx_first3]]; } OmpUtils::Merge(comp, first1, last1, first2, last2, first3); } if (num_merge == 1) { num_merge = 0; } else { num_merge = (num_merge + 2 - 1) / 2; stride = stride * 2; current_height++; } } } void RaiseError(cudaError_t err) throw(ThrustException) { switch (err) { case cudaErrorDeviceAlreadyInUse: throw ThrustException("A call tried to access an exclusive-thread device" " that is already in use by a different thread"); case cudaErrorInsufficientDriver: throw ThrustException("The installed CUDA driver is older than" " the CUDA runtime library"); case cudaErrorInvalidDevice: throw ThrustException("The device ordinal supplied by the user does not" " correspond to a valid CUDA device"); case cudaErrorNoDevice: throw ThrustException("No CUDA-capable devices were detected"); default: throw ThrustException("Unknown error occurred"); } } void SelectDevice(int rank) throw(ThrustException) { int num_dev = CudaUtils::GetDeviceCount(); cudaError_t cuda_state = cudaSetDevice((rank + 1) % num_dev); if (cuda_state != cudaSuccess) { RaiseError(cuda_state); } } template<typename StrictWeakOrdering, typename RandomAccessIterator> void SortByThrust(StrictWeakOrdering comp, RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator out) throw(ThrustException) { typedef typename std::iterator_traits<RandomAccessIterator>::value_type ValueType; #ifdef __DO_BENCHMARK local_sort_transfer_tm.Start(); #endif thrust::device_vector<ValueType> d_v(0); try { d_v.assign(first, last); } catch(std::bad_alloc &e) { throw ThrustException("Couldn't allocate device vector"); } #ifdef __DO_BENCHMARK local_sort_transfer_tm.Stop(); local_sort_sort_tm.Start(); #endif try { thrust::sort(d_v.begin(), d_v.end(), Convert2ThrustComp(comp)); } catch (std::bad_alloc &e) { throw ThrustException("Ran out of memory while sorting"); } catch (thrust::system_error &e) { throw ThrustException("Some other error happened during sort: " + std::string(e.what())); } #ifdef __DO_BENCHMARK local_sort_sort_tm.Stop(); local_sort_transfer_tm.Start(); #endif try { thrust::copy(d_v.begin(), d_v.end(), out); } catch (thrust::system_error &e) { throw ThrustException("Some other error happened during copy: " + std::string(e.what())); } #ifdef __DO_BENCHMARK local_sort_transfer_tm.Stop(); #endif } template<typename StrictWeakOrdering, typename RandomAccessIterator1, typename RandomAccessIterator2, typename KeyValuePair> void SortByThrust(StrictWeakOrdering comp, RandomAccessIterator1 key_first, RandomAccessIterator1 key_last, RandomAccessIterator2 val_first, KeyValuePair *out) throw(ThrustException) { typedef typename std::iterator_traits<RandomAccessIterator1>::value_type KeyType; typedef typename std::iterator_traits<RandomAccessIterator2>::value_type ValueType; int64_t nelem = key_last - key_first; #ifdef __DO_BENCHMARK local_sort_transfer_tm.Start(); #endif thrust::device_vector<KeyType> d_k(0); thrust::device_vector<ValueType> d_v(0); try { d_k.assign(key_first, key_last); d_v.assign(val_first, val_first + nelem); } catch(std::bad_alloc &e) { throw ThrustException("Couldn't allocate device vector"); } #ifdef __DO_BENCHMARK local_sort_transfer_tm.Stop(); local_sort_sort_tm.Start(); #endif try { thrust::sort_by_key(d_k.begin(), d_k.end(), d_v.begin(), Convert2ThrustComp(comp)); } catch (std::bad_alloc &e) { throw ThrustException("Ran out of memory while sorting"); } catch (thrust::system_error &e) { throw ThrustException("Some other error happened during sort: " + std::string(e.what())); } #ifdef __DO_BENCHMARK local_sort_sort_tm.Stop(); local_sort_transfer_tm.Start(); #endif try { thrust::copy(d_k.begin(), d_k.end(), key_first); thrust::copy(d_v.begin(), d_v.end(), val_first); } catch (thrust::system_error &e) { throw ThrustException("Some other error happened during copy: " + std::string(e.what())); } int n_threads = OmpUtils::GetMaxThreads(); FOR_PARALLEL(n_threads, nelem, i, { out[i] = std::make_pair(key_first[i], val_first[i]); }); #ifdef __DO_BENCHMARK local_sort_transfer_tm.Stop(); #endif } void SplitChunks(int64_t num_elems, int64_t size, int64_t *chunk_size, int *num_chunks) throw(ThrustException) { size_t mem_avai = 1, mem_total = 1; local_sort_mem_getinfo_tm.Start(); cudaError_t cuda_state = cudaMemGetInfo(&mem_avai, &mem_total); local_sort_mem_getinfo_tm.Stop(); if (cuda_state != cudaSuccess) RaiseError(cuda_state); int64_t chunk_size_ = mem_avai / (3 * size); int num_chunks_ = (num_elems + chunk_size_ - 1) / chunk_size_; *chunk_size = chunk_size_; *num_chunks = num_chunks_; } } // namespace gpusort
76796c88c0407d76de4aab1e9418ba75df551a01.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * @Author: grantmcgovern * @Date: 2015-09-11 12:19:51 * @Last Modified by: grantmcgovern * @Last Modified time: 2015-09-13 15:42:09 */ #include <stdio.h> #include <stdlib.h> #include <string.h> /* * File_Packet * * Contains a small data packet of * the file info (data + size) to * help with dynamic allocation. * */ struct File_Packet { char *file_data; int file_size; }; /* * get_filename_length(char *[]) * * Computes the length of command line * argument filename to store the filename * as a string, and null-terminate it. * */ int get_filename_length(char *filename[]) { int i = 0; while(filename[1][i] != '\0') i++; return i; } /* * check_command_line_args(int) * * Checks to see whether used used proper * number of command line arguments. */ void check_command_line_args(int argc) { // Ensure command line args are limited to only 1 if(argc > 2 || argc == 1) { printf("Invalid Number of Arguments\n"); exit(1); } } /* * read_encrypted_file(char*, int) * * Takes command line args passed from main * and opens the file, reading the data, then * bulding a character array. */ struct File_Packet read_encrypted_file(char *args[], int length) { int filename_length = get_filename_length(args); char filename[filename_length + 1]; // Null terminate the end to ensure no weird chars filename[filename_length] = '\0'; // Prevents buffer overflow, copies filename strncpy(filename, args[1], filename_length); /* * Read in file content, use fseek() * to get file size, and dynamically * allocate a string. */ FILE *file = fopen(filename, "rb"); // Check if file exits if(file) { fseek(file, 0, SEEK_END); long file_size = ftell(file); fseek(file, 0, SEEK_SET); char *file_data = (char *)(malloc(file_size + 1)); fread(file_data, file_size, 1, file); fclose(file); file_data[file_size] = 0; struct File_Packet packet; packet.file_data = file_data; packet.file_size = file_size; return packet; } else { printf("%s\n", "File does not exist"); exit(1); } } /* * caesar_cipher(char*) * * Takes a character array of the file contents * and converts each character to its decrypted * state by first casting to int, decrementing by * 1, then casting back to a char. */ __global__ void caesar_cipher(char *file_data, char *dev_decrypted_file_data) { // Get thread id as the index int i = threadIdx.x; // Cast to int int to_int = (int)file_data[i]; // Decrement and cast back to char char decrypted = (char)(to_int - 1); // Store the answer dev_decrypted_file_data[i] = decrypted; } /* * print_decrypted_message(char *) * * Recieves the memory block back from CUDA, * and prints the decrypted message. */ void print_decrypted_message(char *decrypted_file_data) { printf("%s\n", decrypted_file_data); exit(0); } /* * MAIN */ int main(int argc, char *argv[]) { // First check command line args are valid check_command_line_args(argc); // Get file contents struct File_Packet packet = read_encrypted_file(argv, argc); // Get file length (chars) int file_size = packet.file_size; // Compute size of memory block we'll need int size = file_size * sizeof(char); // Local memory char *file_data = packet.file_data; char decrypted_file_data[file_size]; // Device memory char *dev_file_data; char *dev_decrypted_file_data; // Allocate memory on the GPU hipMalloc((void**)&dev_file_data, size); hipMalloc((void**)&dev_decrypted_file_data, size); hipMemcpy(dev_file_data, file_data, size, hipMemcpyHostToDevice); // Decrypt the message on the GPU hipLaunchKernelGGL(( caesar_cipher), dim3(1), dim3(file_size), 0, 0, dev_file_data, dev_decrypted_file_data); // Not sure if we need this, since we're only running on 1 thread hipDeviceSynchronize(); hipMemcpy(decrypted_file_data, dev_decrypted_file_data, size, hipMemcpyDeviceToHost); // Check we've decrypted print_decrypted_message(decrypted_file_data); // Deallocate memory on CUDA hipFree(dev_file_data); hipFree(dev_decrypted_file_data); // Exit with success exit(0); }
76796c88c0407d76de4aab1e9418ba75df551a01.cu
/* * @Author: grantmcgovern * @Date: 2015-09-11 12:19:51 * @Last Modified by: grantmcgovern * @Last Modified time: 2015-09-13 15:42:09 */ #include <stdio.h> #include <stdlib.h> #include <string.h> /* * File_Packet * * Contains a small data packet of * the file info (data + size) to * help with dynamic allocation. * */ struct File_Packet { char *file_data; int file_size; }; /* * get_filename_length(char *[]) * * Computes the length of command line * argument filename to store the filename * as a string, and null-terminate it. * */ int get_filename_length(char *filename[]) { int i = 0; while(filename[1][i] != '\0') i++; return i; } /* * check_command_line_args(int) * * Checks to see whether used used proper * number of command line arguments. */ void check_command_line_args(int argc) { // Ensure command line args are limited to only 1 if(argc > 2 || argc == 1) { printf("Invalid Number of Arguments\n"); exit(1); } } /* * read_encrypted_file(char*, int) * * Takes command line args passed from main * and opens the file, reading the data, then * bulding a character array. */ struct File_Packet read_encrypted_file(char *args[], int length) { int filename_length = get_filename_length(args); char filename[filename_length + 1]; // Null terminate the end to ensure no weird chars filename[filename_length] = '\0'; // Prevents buffer overflow, copies filename strncpy(filename, args[1], filename_length); /* * Read in file content, use fseek() * to get file size, and dynamically * allocate a string. */ FILE *file = fopen(filename, "rb"); // Check if file exits if(file) { fseek(file, 0, SEEK_END); long file_size = ftell(file); fseek(file, 0, SEEK_SET); char *file_data = (char *)(malloc(file_size + 1)); fread(file_data, file_size, 1, file); fclose(file); file_data[file_size] = 0; struct File_Packet packet; packet.file_data = file_data; packet.file_size = file_size; return packet; } else { printf("%s\n", "File does not exist"); exit(1); } } /* * caesar_cipher(char*) * * Takes a character array of the file contents * and converts each character to its decrypted * state by first casting to int, decrementing by * 1, then casting back to a char. */ __global__ void caesar_cipher(char *file_data, char *dev_decrypted_file_data) { // Get thread id as the index int i = threadIdx.x; // Cast to int int to_int = (int)file_data[i]; // Decrement and cast back to char char decrypted = (char)(to_int - 1); // Store the answer dev_decrypted_file_data[i] = decrypted; } /* * print_decrypted_message(char *) * * Recieves the memory block back from CUDA, * and prints the decrypted message. */ void print_decrypted_message(char *decrypted_file_data) { printf("%s\n", decrypted_file_data); exit(0); } /* * MAIN */ int main(int argc, char *argv[]) { // First check command line args are valid check_command_line_args(argc); // Get file contents struct File_Packet packet = read_encrypted_file(argv, argc); // Get file length (chars) int file_size = packet.file_size; // Compute size of memory block we'll need int size = file_size * sizeof(char); // Local memory char *file_data = packet.file_data; char decrypted_file_data[file_size]; // Device memory char *dev_file_data; char *dev_decrypted_file_data; // Allocate memory on the GPU cudaMalloc((void**)&dev_file_data, size); cudaMalloc((void**)&dev_decrypted_file_data, size); cudaMemcpy(dev_file_data, file_data, size, cudaMemcpyHostToDevice); // Decrypt the message on the GPU caesar_cipher<<<1, file_size>>>(dev_file_data, dev_decrypted_file_data); // Not sure if we need this, since we're only running on 1 thread cudaThreadSynchronize(); cudaMemcpy(decrypted_file_data, dev_decrypted_file_data, size, cudaMemcpyDeviceToHost); // Check we've decrypted print_decrypted_message(decrypted_file_data); // Deallocate memory on CUDA cudaFree(dev_file_data); cudaFree(dev_decrypted_file_data); // Exit with success exit(0); }
24d66dd8dba54e14ca08bec1b19ae37d4f127a06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "qsort_cuda.cuh" #include <cstdio> #include <cstdlib> void run_qsort(unsigned int* data, unsigned int nitems) { CHECK_CUDA_ERR(hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, MAX_DEPTH)); int left = 0; int right = nitems - 1; std::fprintf(stdout, "Launching kernel on the GPU\n"); hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, 0, data, left, right, 0); CHECK_CUDA_ERR(hipDeviceSynchronize()); } void initialize_data(unsigned int* dst, unsigned int nitems) { srand(2047); for (unsigned i = 0; i < nitems; i++) dst[i] = rand() % nitems; } int main() { int num_items; std::fprintf(stdout, "Type in the number of items: "); std::fscanf(stdin, "%d", &num_items); int device_count = 0, device = -1; CHECK_CUDA_ERR(hipGetDeviceCount(&device_count)); for (int i = 0; i < device_count; ++i) { hipDeviceProp_t properties; CHECK_CUDA_ERR(hipGetDeviceProperties(&properties, i)); if (properties.major > 3 || (properties.major == 3 && properties.minor >= 5)) { device = i; std::fprintf(stdout, "Running on GPU %d (%s)\n", i, properties.name); break; } std::fprintf(stdout, "GPU %d (%s) does not support CUDA Dynamic Parallelism\n", i, properties.name); } if (device == -1) { std::fprintf(stderr, "QSortCUDASimple requires GPU devices with compute SM 3.5 or higher. Exiting...\n"); exit(EXIT_FAILURE); } hipSetDevice(device); unsigned int *d_data = 0; CHECK_CUDA_ERR(hipMallocManaged((void **)&d_data, num_items * sizeof(unsigned int))); std::fprintf(stdout, "Initializing data:\n"); initialize_data(d_data, num_items); for (int i = 0; i < num_items; i++) std::fprintf(stdout, "Data [%u]: \n", d_data[i]); std::fprintf(stdout, "Running quicksort on %d elements\n", num_items); run_qsort(d_data, num_items); for (int i = 0; i < num_items; i++) std::fprintf(stdout, "Data [%u]: \n", d_data[i]); exit(EXIT_SUCCESS); }
24d66dd8dba54e14ca08bec1b19ae37d4f127a06.cu
#include "qsort_cuda.cuh" #include <cstdio> #include <cstdlib> void run_qsort(unsigned int* data, unsigned int nitems) { CHECK_CUDA_ERR(cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, MAX_DEPTH)); int left = 0; int right = nitems - 1; std::fprintf(stdout, "Launching kernel on the GPU\n"); cdp_simple_quicksort<<<1, 1>>>(data, left, right, 0); CHECK_CUDA_ERR(cudaDeviceSynchronize()); } void initialize_data(unsigned int* dst, unsigned int nitems) { srand(2047); for (unsigned i = 0; i < nitems; i++) dst[i] = rand() % nitems; } int main() { int num_items; std::fprintf(stdout, "Type in the number of items: "); std::fscanf(stdin, "%d", &num_items); int device_count = 0, device = -1; CHECK_CUDA_ERR(cudaGetDeviceCount(&device_count)); for (int i = 0; i < device_count; ++i) { cudaDeviceProp properties; CHECK_CUDA_ERR(cudaGetDeviceProperties(&properties, i)); if (properties.major > 3 || (properties.major == 3 && properties.minor >= 5)) { device = i; std::fprintf(stdout, "Running on GPU %d (%s)\n", i, properties.name); break; } std::fprintf(stdout, "GPU %d (%s) does not support CUDA Dynamic Parallelism\n", i, properties.name); } if (device == -1) { std::fprintf(stderr, "QSortCUDASimple requires GPU devices with compute SM 3.5 or higher. Exiting...\n"); exit(EXIT_FAILURE); } cudaSetDevice(device); unsigned int *d_data = 0; CHECK_CUDA_ERR(cudaMallocManaged((void **)&d_data, num_items * sizeof(unsigned int))); std::fprintf(stdout, "Initializing data:\n"); initialize_data(d_data, num_items); for (int i = 0; i < num_items; i++) std::fprintf(stdout, "Data [%u]: \n", d_data[i]); std::fprintf(stdout, "Running quicksort on %d elements\n", num_items); run_qsort(d_data, num_items); for (int i = 0; i < num_items; i++) std::fprintf(stdout, "Data [%u]: \n", d_data[i]); exit(EXIT_SUCCESS); }
07a10459a55dc5b48723665d6c78584fa8721916.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from ztranspose.cu normal z -> d, Sat Nov 15 19:53:59 2014 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void dtranspose_device( int m, int n, const double *A, int lda, double *AT, int ldat) { __shared__ double sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void dtranspose_kernel( int m, int n, const double *A, int lda, double *AT, int ldat) { dtranspose_device(m, n, A, lda, AT, ldat); } __global__ void dtranspose_kernel_batched( int m, int n, double **dA_array, int lda, double **dAT_array, int ldat) { int batchid = blockIdx.z; dtranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /** Purpose ------- dtranspose_q copies and transposes a matrix dA to matrix dAT. Same as dtranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA DOUBLE_PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT DOUBLE_PRECISION array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_q( magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB ); hipLaunchKernelGGL(( dtranspose_kernel), dim3(grid), dim3(threads), 0, queue , m, n, dA, ldda, dAT, lddat ); } /** @see magmablas_dtranspose_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose( magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dAT, magma_int_t lddat ) { magmablas_dtranspose_q( m, n, dA, ldda, dAT, lddat, magma_stream ); } /** Purpose ------- dtranspose_batched_q copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as dtranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array DOUBLE_PRECISION* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array DOUBLE_PRECISION* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_batched_q( magma_int_t m, magma_int_t n, double **dA_array, magma_int_t ldda, double **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB, batchCount ); hipLaunchKernelGGL(( dtranspose_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dA_array, ldda, dAT_array, lddat ); } /** @see magmablas_dtranspose_batched_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_batched( magma_int_t m, magma_int_t n, double **dA_array, magma_int_t ldda, double **dAT_array, magma_int_t lddat, magma_int_t batchCount ) { magmablas_dtranspose_batched_q( m, n, dA_array, ldda, dAT_array, lddat, batchCount, magma_stream ); }
07a10459a55dc5b48723665d6c78584fa8721916.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from ztranspose.cu normal z -> d, Sat Nov 15 19:53:59 2014 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void dtranspose_device( int m, int n, const double *A, int lda, double *AT, int ldat) { __shared__ double sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void dtranspose_kernel( int m, int n, const double *A, int lda, double *AT, int ldat) { dtranspose_device(m, n, A, lda, AT, ldat); } __global__ void dtranspose_kernel_batched( int m, int n, double **dA_array, int lda, double **dAT_array, int ldat) { int batchid = blockIdx.z; dtranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /** Purpose ------- dtranspose_q copies and transposes a matrix dA to matrix dAT. Same as dtranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA DOUBLE_PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT DOUBLE_PRECISION array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_q( magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB ); dtranspose_kernel<<< grid, threads, 0, queue >>> ( m, n, dA, ldda, dAT, lddat ); } /** @see magmablas_dtranspose_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose( magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dAT, magma_int_t lddat ) { magmablas_dtranspose_q( m, n, dA, ldda, dAT, lddat, magma_stream ); } /** Purpose ------- dtranspose_batched_q copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as dtranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array DOUBLE_PRECISION* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array DOUBLE_PRECISION* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_batched_q( magma_int_t m, magma_int_t n, double **dA_array, magma_int_t ldda, double **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB, batchCount ); dtranspose_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dA_array, ldda, dAT_array, lddat ); } /** @see magmablas_dtranspose_batched_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_batched( magma_int_t m, magma_int_t n, double **dA_array, magma_int_t ldda, double **dAT_array, magma_int_t lddat, magma_int_t batchCount ) { magmablas_dtranspose_batched_q( m, n, dA_array, ldda, dAT_array, lddat, batchCount, magma_stream ); }
117a3ff24b489e50b7f28a1591c7be6cdad196f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "layers/operator.h" #include <string.h> // -------------------------------------------------------------------------- // kernel code // scale_const_{gpu, cpu} // -------------------------------------------------------------------------- // element-wise addition // top[i] = top[i] + bottom[i] #ifdef GPU __global__ static void eltwise_add_gpu(const real bottom[], real top[], const long int data_size) { const long int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < data_size) { top[index] += bottom[index]; } } #else static void eltwise_add_cpu(const real bottom[], real top[], const long int data_size) { for (long int index = 0; index < data_size; ++index) { top[index] += bottom[index]; } } #endif // -------------------------------------------------------------------------- // layer-wise operator code // eltwise_sum_forward // -------------------------------------------------------------------------- // element-wise sum: top = bottoms[0] + ... + bottoms[num_bottoms-1] static void eltwise_forward(const Tensor* const bottoms[], Tensor* const top, const int num_bottoms) { if (num_bottoms > 0) { const int data_size = get_data_size(bottoms[0]); #ifdef GPU { const int threads_per_block = 512; const int num_blocks = DIV_THEN_CEIL(data_size, threads_per_block); hipMemcpyAsync(top->data, bottoms[0]->data, data_size * sizeof(real), hipMemcpyDeviceToDevice); for (int m = 1; m < num_bottoms; ++m) { hipLaunchKernelGGL(( eltwise_add_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0, bottoms[m]->data, top->data, data_size); } } #else { memcpy(top->data, bottoms[0]->data, data_size * sizeof(real)); for (int m = 1; m < num_bottoms; ++m) { eltwise_add_cpu(bottoms[m]->data, top->data, data_size); } } #endif } } // -------------------------------------------------------------------------- // output shape calculator code // -------------------------------------------------------------------------- static void eltwise_shape(const Tensor* const bottoms[], Tensor* const top, const int num_bottoms) { const Tensor* const p_bottom = (num_bottoms > 0) ? bottoms[0] : NULL; // top shape = bottom shape if (p_bottom) { top->ndim = p_bottom->ndim; top->num_items = p_bottom->num_items; for (int n = 0; n < p_bottom->num_items; ++n) { for (int i = 0; i < p_bottom->ndim; ++i) { top->shape[n][i] = p_bottom->shape[n][i]; } } for (int n = 0; n < p_bottom->num_items; ++n) { top->start[n] = p_bottom->start[n]; } } } // -------------------------------------------------------------------------- // functions for layer instance // -------------------------------------------------------------------------- void forward_eltwise_layer(void* const net_, void* const layer_) { Layer* const layer = (Layer*)layer_; eltwise_forward(layer->p_bottoms, get_top(layer, 0), layer->num_bottoms); } void shape_eltwise_layer(void* const net_, void* const layer_) { Layer* const layer = (Layer*)layer_; eltwise_shape(layer->p_bottoms, get_top(layer, 0), layer->num_bottoms); } void init_eltwise_layer(void* const net_, void* const layer_) { return; } void free_eltwise_layer(void* const net_, void* const layer_) { return; }
117a3ff24b489e50b7f28a1591c7be6cdad196f5.cu
#include "layers/operator.h" #include <string.h> // -------------------------------------------------------------------------- // kernel code // scale_const_{gpu, cpu} // -------------------------------------------------------------------------- // element-wise addition // top[i] = top[i] + bottom[i] #ifdef GPU __global__ static void eltwise_add_gpu(const real bottom[], real top[], const long int data_size) { const long int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < data_size) { top[index] += bottom[index]; } } #else static void eltwise_add_cpu(const real bottom[], real top[], const long int data_size) { for (long int index = 0; index < data_size; ++index) { top[index] += bottom[index]; } } #endif // -------------------------------------------------------------------------- // layer-wise operator code // eltwise_sum_forward // -------------------------------------------------------------------------- // element-wise sum: top = bottoms[0] + ... + bottoms[num_bottoms-1] static void eltwise_forward(const Tensor* const bottoms[], Tensor* const top, const int num_bottoms) { if (num_bottoms > 0) { const int data_size = get_data_size(bottoms[0]); #ifdef GPU { const int threads_per_block = 512; const int num_blocks = DIV_THEN_CEIL(data_size, threads_per_block); cudaMemcpyAsync(top->data, bottoms[0]->data, data_size * sizeof(real), cudaMemcpyDeviceToDevice); for (int m = 1; m < num_bottoms; ++m) { eltwise_add_gpu<<<num_blocks, threads_per_block>>>( bottoms[m]->data, top->data, data_size); } } #else { memcpy(top->data, bottoms[0]->data, data_size * sizeof(real)); for (int m = 1; m < num_bottoms; ++m) { eltwise_add_cpu(bottoms[m]->data, top->data, data_size); } } #endif } } // -------------------------------------------------------------------------- // output shape calculator code // -------------------------------------------------------------------------- static void eltwise_shape(const Tensor* const bottoms[], Tensor* const top, const int num_bottoms) { const Tensor* const p_bottom = (num_bottoms > 0) ? bottoms[0] : NULL; // top shape = bottom shape if (p_bottom) { top->ndim = p_bottom->ndim; top->num_items = p_bottom->num_items; for (int n = 0; n < p_bottom->num_items; ++n) { for (int i = 0; i < p_bottom->ndim; ++i) { top->shape[n][i] = p_bottom->shape[n][i]; } } for (int n = 0; n < p_bottom->num_items; ++n) { top->start[n] = p_bottom->start[n]; } } } // -------------------------------------------------------------------------- // functions for layer instance // -------------------------------------------------------------------------- void forward_eltwise_layer(void* const net_, void* const layer_) { Layer* const layer = (Layer*)layer_; eltwise_forward(layer->p_bottoms, get_top(layer, 0), layer->num_bottoms); } void shape_eltwise_layer(void* const net_, void* const layer_) { Layer* const layer = (Layer*)layer_; eltwise_shape(layer->p_bottoms, get_top(layer, 0), layer->num_bottoms); } void init_eltwise_layer(void* const net_, void* const layer_) { return; } void free_eltwise_layer(void* const net_, void* const layer_) { return; }
34e6589f6d72041661951ae1f52bf8246c9385f0.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file adaptive_avg_pooling.cu * \brief adaptive average pooling operator * \author Hang Zhang */ #include <hip/hip_runtime_api.h> #include <algorithm> #include "adaptive_avg_pooling-inl.h" #define START_IND(a, b, c) static_cast<int>(floor(static_cast<float>(a * c) / b)) #define END_IND(a, b, c) static_cast<int>(ceil(static_cast<float>((a + 1) * c) / b)) #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit namespace mxnet { namespace op { using namespace mshadow; template<typename In, typename Out> struct ScalarConvert { static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; } }; /* * Description: * this function adaptively average pools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output */ template <typename T> __global__ void adaptiveaveragepool(T *input, T *output, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators on output pixels int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling over corresponding input pixels T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; T sum = ScalarConvert<int, T>::to(0); int ih, iw; for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { T val = ptr_input[iw*istrideW]; sum += val; } ptr_input += istrideH; // next input line } // Update output *ptr_output = sum / kH / kW; } } } /* * Description: * this function computes the gradInput from gradOutput * (uses atomic add) */ template <typename T> __global__ void atomicadaptiveaveragegradinput( T *gradInput, T *gradOutput, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators on output indices int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; gradOutput = gradOutput + o_plane*osizeW*osizeH; gradInput = gradInput + i_plane*isizeW*isizeH; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients for over corresponding input pixels T *ptr_gradInput = gradInput + istartH*isizeW + istartW; T *ptr_gradOutput = gradOutput + oh*osizeW + ow; T grad_delta = *ptr_gradOutput / kW / kH; int ih, iw; for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { // atomic add since different threads could update same variable atomicAdd(&(ptr_gradInput[iw]), grad_delta); } ptr_gradInput += isizeW; // next input line } } } } template<typename xpu, typename DType, typename AccReal> void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s, const std::vector<TBlob> &input, const std::vector<TBlob> &output) { Tensor<xpu, 4, DType> itensor = input[0].get<xpu, 4, DType>(s); Tensor<xpu, 4, DType> otensor = output[0].get<xpu, 4, DType>(s); DType *input_data = itensor.dptr_; DType *output_data = otensor.dptr_; int64_t sizeB = itensor.size(0); int64_t sizeD = itensor.size(1); int64_t isizeH = itensor.size(2); int64_t isizeW = itensor.size(3); int64_t istrideD = get_stride<xpu, 4, DType>(itensor, 1); int64_t istrideH = get_stride<xpu, 4, DType>(itensor, 2); int64_t istrideW = get_stride<xpu, 4, DType>(itensor, 3); int64_t osizeH = otensor.size(2); int64_t osizeW = otensor.size(3); // cuda blocks & threads: int blocksH = max(static_cast<int>(16L / sizeD), 1); dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); hipStream_t stream = mshadow::Stream<gpu>::GetStream(s); // run averagepool kernel hipLaunchKernelGGL(( adaptiveaveragepool) , dim3(blocks), dim3(threads), 0, stream, input_data, output_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); MSHADOW_CUDA_POST_KERNEL_CHECK(AdaptiveAvgPoolUpdateOutput); } template<typename xpu, typename DType, typename AccReal> void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s, const std::vector<TBlob> &input, const std::vector<TBlob> &output) { Tensor<xpu, 4, DType> gradOut = input[0].get<xpu, 4, DType>(s); Tensor<xpu, 4, DType> gradIn = output[0].get<xpu, 4, DType>(s); DType *gradOutput_data = gradOut.dptr_; DType *gradInput_data = gradIn.dptr_; int64_t sizeB = gradIn.size(0); int64_t sizeD = gradIn.size(1); int64_t isizeH = gradIn.size(2); int64_t isizeW = gradIn.size(3); int64_t osizeH = gradOut.size(2); int64_t osizeW = gradOut.size(3); // cuda blocks & threads: int blocksH = max(static_cast<int>(16L / sizeD), 1); dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); hipStream_t stream = mshadow::Stream<gpu>::GetStream(s); // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptiveaveragegradinput) , dim3(blocks), dim3(threads), 0, stream, gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); MSHADOW_CUDA_POST_KERNEL_CHECK(AdaptiveAvgPoolUpdateGradInput); } NNVM_REGISTER_OP(_contrib_AdaptiveAvgPooling2D) .set_attr<FCompute>("FCompute<gpu>", AdaptiveAvgPoolOpForward<gpu>); NNVM_REGISTER_OP(_backward_contrib_AdaptiveAvgPooling2D) .set_attr<FCompute>("FCompute<gpu>", AdaptiveAvgPoolOpBackward<gpu>); } // namespace op } // namespace mxnet
34e6589f6d72041661951ae1f52bf8246c9385f0.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file adaptive_avg_pooling.cu * \brief adaptive average pooling operator * \author Hang Zhang */ #include <cuda_runtime_api.h> #include <algorithm> #include "adaptive_avg_pooling-inl.h" #define START_IND(a, b, c) static_cast<int>(floor(static_cast<float>(a * c) / b)) #define END_IND(a, b, c) static_cast<int>(ceil(static_cast<float>((a + 1) * c) / b)) #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit namespace mxnet { namespace op { using namespace mshadow; template<typename In, typename Out> struct ScalarConvert { static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; } }; /* * Description: * this function adaptively average pools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output */ template <typename T> __global__ void adaptiveaveragepool(T *input, T *output, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators on output pixels int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling over corresponding input pixels T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; T sum = ScalarConvert<int, T>::to(0); int ih, iw; for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { T val = ptr_input[iw*istrideW]; sum += val; } ptr_input += istrideH; // next input line } // Update output *ptr_output = sum / kH / kW; } } } /* * Description: * this function computes the gradInput from gradOutput * (uses atomic add) */ template <typename T> __global__ void atomicadaptiveaveragegradinput( T *gradInput, T *gradOutput, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators on output indices int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; gradOutput = gradOutput + o_plane*osizeW*osizeH; gradInput = gradInput + i_plane*isizeW*isizeH; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients for over corresponding input pixels T *ptr_gradInput = gradInput + istartH*isizeW + istartW; T *ptr_gradOutput = gradOutput + oh*osizeW + ow; T grad_delta = *ptr_gradOutput / kW / kH; int ih, iw; for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { // atomic add since different threads could update same variable atomicAdd(&(ptr_gradInput[iw]), grad_delta); } ptr_gradInput += isizeW; // next input line } } } } template<typename xpu, typename DType, typename AccReal> void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<gpu> *s, const std::vector<TBlob> &input, const std::vector<TBlob> &output) { Tensor<xpu, 4, DType> itensor = input[0].get<xpu, 4, DType>(s); Tensor<xpu, 4, DType> otensor = output[0].get<xpu, 4, DType>(s); DType *input_data = itensor.dptr_; DType *output_data = otensor.dptr_; int64_t sizeB = itensor.size(0); int64_t sizeD = itensor.size(1); int64_t isizeH = itensor.size(2); int64_t isizeW = itensor.size(3); int64_t istrideD = get_stride<xpu, 4, DType>(itensor, 1); int64_t istrideH = get_stride<xpu, 4, DType>(itensor, 2); int64_t istrideW = get_stride<xpu, 4, DType>(itensor, 3); int64_t osizeH = otensor.size(2); int64_t osizeW = otensor.size(3); // cuda blocks & threads: int blocksH = max(static_cast<int>(16L / sizeD), 1); dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s); // run averagepool kernel adaptiveaveragepool <<<blocks, threads, 0, stream>>> ( input_data, output_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); MSHADOW_CUDA_POST_KERNEL_CHECK(AdaptiveAvgPoolUpdateOutput); } template<typename xpu, typename DType, typename AccReal> void AdaptiveAvgPoolUpdateGradInput(mshadow::Stream<gpu> *s, const std::vector<TBlob> &input, const std::vector<TBlob> &output) { Tensor<xpu, 4, DType> gradOut = input[0].get<xpu, 4, DType>(s); Tensor<xpu, 4, DType> gradIn = output[0].get<xpu, 4, DType>(s); DType *gradOutput_data = gradOut.dptr_; DType *gradInput_data = gradIn.dptr_; int64_t sizeB = gradIn.size(0); int64_t sizeD = gradIn.size(1); int64_t isizeH = gradIn.size(2); int64_t isizeW = gradIn.size(3); int64_t osizeH = gradOut.size(2); int64_t osizeW = gradOut.size(3); // cuda blocks & threads: int blocksH = max(static_cast<int>(16L / sizeD), 1); dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s); // run updateGradInput kernel, accumulate gradients atomically atomicadaptiveaveragegradinput <<<blocks, threads, 0, stream>>> ( gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); MSHADOW_CUDA_POST_KERNEL_CHECK(AdaptiveAvgPoolUpdateGradInput); } NNVM_REGISTER_OP(_contrib_AdaptiveAvgPooling2D) .set_attr<FCompute>("FCompute<gpu>", AdaptiveAvgPoolOpForward<gpu>); NNVM_REGISTER_OP(_backward_contrib_AdaptiveAvgPooling2D) .set_attr<FCompute>("FCompute<gpu>", AdaptiveAvgPoolOpBackward<gpu>); } // namespace op } // namespace mxnet
386ef45adbe9b5cccb4f74f0efc98659f5dda34a.hip
// !!! This is a file automatically generated by hipify!!! #undef __SSE2__ #include <stdio.h> #include <time.h> #include <cv.h> #include <highgui.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <math_functions.h> #include <map> #include "ant.h" #include "cutil/cutil.h" //#include "cutil_inline.h" using namespace std; #define ANTS 1024 void showImage(IplImage *img){ cvNamedWindow("Slika", CV_WINDOW_AUTOSIZE); cvMoveWindow("Slika", 440, 65); cvShowImage("Slika", img); while(true) if (cvWaitKey(10) == 27) break; cvDestroyWindow("Slika"); } __device__ position d_imageMatrix[512][512]; __device__ float alpha = 4; __device__ float beta = 2; __device__ float sum[1024]; __device__ float getMax(float vals[4]){ float max = vals[0]; for (int i = 1; i < 4; ++i){ max = max > vals[i] ? max : vals[i]; } return max; } __device__ float myRand(unsigned long seed){ unsigned long next = seed * 1103515245 + 12345; unsigned long temp = ((unsigned)(next/65536) % 32768); return (float)temp/32768; } __global__ void setImageValues(float *img, size_t pitch, float maxValue){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float *q = (float *)((char *)img + j * pitch) + i; *q = (*q) / maxValue; d_imageMatrix[i][j].val = *q; d_imageMatrix[i][j].antCount = 0; d_imageMatrix[i][j].tao = 0.001; } __global__ void setNeighs(int height, int width){ float tl, tm, tr; float ml, mr; float bl, bm, br; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float intens[4]; tl = (i - 1 >= 0 && j - 1 >= 0) ? d_imageMatrix[i-1][j-1].val : d_imageMatrix[i][j].val; br = (i + 1 <= height && j + 1 <= width) ? d_imageMatrix[i+1][j+1].val : d_imageMatrix[i][j].val; tr = (i - 1 >= 0 && j + 1 <= width) ? d_imageMatrix[i-1][j+1].val : d_imageMatrix[i][j].val; bl = (i + 1 <= height && j - 1 >= 0) ? d_imageMatrix[i+1][j-1].val : d_imageMatrix[i][j].val; tm = (i - 1 >= 0) ? d_imageMatrix[i-1][j].val : d_imageMatrix[i][j].val; bm = (i + 1 < height) ? d_imageMatrix[i+1][j].val : d_imageMatrix[i][j].val; ml = (j - 1 >= 0) ? d_imageMatrix[i][j-1].val : d_imageMatrix[i][j].val; mr = (j + 1 < width) ? d_imageMatrix[i][j+1].val : d_imageMatrix[i][j].val; intens[0] = fabs(tl - br); intens[1] = fabs(tr - bl); intens[2] = fabs(ml - mr); intens[3] = fabs(tm - bm); d_imageMatrix[i][j].ni = d_imageMatrix[i][j].val * getMax(intens); int index = 0; if (i - 1 >= 0 && j - 1 >= 0) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i-1][j-1]; if (i + 1 < height && j + 1 < width) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i+1][j+1]; if (i - 1 >= 0 && j + 1 < width) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i-1][j+1]; if (i + 1 < height && j - 1 >= 0) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i+1][j-1]; if (i - 1 >= 0) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i-1][j]; if (i + 1 < height) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i+1][j]; if (j - 1 >= 0) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i][j-1]; if (j + 1 < width) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i][j+1]; d_imageMatrix[i][j].neighCount = index; } __global__ void setAnts(ant ants[ANTS]){ int i = blockIdx.x * blockDim.x + threadIdx.x; int x = ants[i].startx; int y = ants[i].starty; ants[i].push_back(&d_imageMatrix[x][y]); } __global__ void walk(ant ants[ANTS], unsigned long seed){ int antIndex = blockIdx.x * blockDim.x + threadIdx.x; if (antIndex >= ANTS) return; int admissibleCount = 0; position *admissible[8]; position *last = ants[antIndex].path.last(); double probabilities[8]; double probSum = 0; if (ants[antIndex].path.getCount() == 1){ for (int i = 0; i < last->neighCount; ++i){ admissible[i] = last->neigh[i]; ++admissibleCount; } for (int i = 0; i < admissibleCount; ++i){ position *tmp = last->neigh[i]; double probability = powf(tmp->tao, alpha) * powf(tmp->ni, beta); probabilities[i] = probability; probSum += probability; } } else if (ants[antIndex].path.getCount() > 1){ position *penultimate = ants[antIndex].path.penultimate(); for (int neighbors = 0; neighbors < last->neighCount; ++neighbors){ if (ants[antIndex].path.contains(last->neigh[neighbors]) || (last->neigh[neighbors] == penultimate)) continue; admissible[admissibleCount++] = last->neigh[neighbors]; } --admissibleCount; for (int i = 0; i < admissibleCount; ++i){ position *tmp = admissible[i]; double probability = powf(tmp->tao, alpha) * powf(tmp->ni, beta); probabilities[i] = probability; probSum += probability; } } double r = myRand(antIndex + seed) * probSum; double acumulatedSum = 0; position *next = 0; for (int i = 0; i < admissibleCount; ++i){ acumulatedSum += probabilities[i]; if (r < acumulatedSum) next = last->neigh[i]; } if (!next){ if (admissibleCount) next = admissible[admissibleCount]; else { int index = (int)myRand(seed << 3) * 32786 % ants[antIndex].path.size(); next = ants[antIndex].path[index]; } //next = ants[antIndex].path[0]; } atomicAdd(&next->antCount, 1); ants[antIndex].push_back(next); } __global__ void updateTrails(){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0; if (d_imageMatrix[i][j].ni >= 0.08) { sum = d_imageMatrix[i][j].ni * d_imageMatrix[i][j].antCount; } d_imageMatrix[i][j].tao = d_imageMatrix[i][j].tao * (1 - 0.04) + sum; d_imageMatrix[i][j].antCount = 0; } int main(int argc, char *argv[]){ IplImage *in = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE); int height = in->height; int width = in->width; // showImage(in); float maxValue = 0; /* float *hostImageValues = (float *)malloc(height * width * sizeof(float)); for (int i = 0; i < height; ++i){ for (int j = 0; j < width; ++j){ *(hostImageValues + i * width + j) = ((uchar *)(in->imageData + i*in->widthStep))[j]; maxValue = maxValue > *(hostImageValues + i * width + j) ? maxValue : *(hostImageValues + i * width + j); } } */ float hostImageValues[512][512]; for (int i = 0; i < height; ++i){ for (int j = 0; j < width; ++j){ hostImageValues[i][j] = cvGet2D(in, i, j).val[0]; maxValue = maxValue > hostImageValues[i][j] ? maxValue : hostImageValues[i][j]; } } float *imageIntensityValues; size_t pitch; hipMallocPitch((void **)&imageIntensityValues, &pitch, sizeof(float) * width, height); hipMemcpy(imageIntensityValues, hostImageValues, sizeof(float) * width * height, hipMemcpyHostToDevice); dim3 threadsPerBlock(16, 16); dim3 numBlocks(512 / threadsPerBlock.x, 512 / threadsPerBlock.y); hipLaunchKernelGGL(( setImageValues), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, imageIntensityValues, pitch, maxValue); hipLaunchKernelGGL(( setNeighs), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, height, width); map<pair<int, int>, bool> mapa; ant mravi[ANTS]; srand((unsigned)time(NULL)); int k = 0; while(k < ANTS){ int i = rand() % height; int j = rand() % width; pair<int, int> lokacija (i, j); if (mapa.find(lokacija) != mapa.end()) continue; mapa[lokacija] = true; ++k; } map<pair<int, int>, bool>::iterator it; int index = 0; for (it = mapa.begin(); it != mapa.end(); ++it){ pair<int, int> p = it->first; int x = p.first; int y = p.second; int len = rand() % 15 + 25 + 1; mravi[index] = ant(len); mravi[index++].setStartPosition(x, y); } ant *deviceAnts; hipMalloc((void **)&deviceAnts, ANTS * sizeof(ant)); hipMemcpy(deviceAnts, mravi, ANTS * sizeof(ant), hipMemcpyHostToDevice); hipLaunchKernelGGL(( setAnts), dim3(32), dim3(32), 0, 0, deviceAnts); int iter = atoi(argv[2]); for (int i = 0; i < iter; ++i){ hipLaunchKernelGGL(( walk), dim3(32), dim3(32), 0, 0, deviceAnts, (unsigned)time(NULL)); hipLaunchKernelGGL(( updateTrails), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, ); } position *slika = (position *)malloc(512 * 512 * sizeof(position)); hipMemcpyFromSymbol(slika, "d_imageMatrix", 512 * 512 * sizeof(position), 0, hipMemcpyDeviceToHost); IplImage *out = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1); float total = 0; for (int i = 0; i < height; ++i){ for (int j = 0; j < width; ++j){ total = total + slika[i * width + j].tao; } } printf("%f\n", total); total /= (width * height); for (int i = 0; i < height; ++i){ for (int j = 0; j < width; ++j){ if (slika[i * width + j].tao >= total) cvSet2D(out, j, i, cvScalar(255,0,0,0)); } } hipFree(imageIntensityValues); hipFree(deviceAnts); //free(hostImageValues); showImage(out); cvReleaseImage(&out); cvReleaseImage(&in); free(slika); return 0; }
386ef45adbe9b5cccb4f74f0efc98659f5dda34a.cu
#undef __SSE2__ #include <stdio.h> #include <time.h> #include <cv.h> #include <highgui.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <math_functions.h> #include <map> #include "ant.h" #include "cutil/cutil.h" //#include "cutil_inline.h" using namespace std; #define ANTS 1024 void showImage(IplImage *img){ cvNamedWindow("Slika", CV_WINDOW_AUTOSIZE); cvMoveWindow("Slika", 440, 65); cvShowImage("Slika", img); while(true) if (cvWaitKey(10) == 27) break; cvDestroyWindow("Slika"); } __device__ position d_imageMatrix[512][512]; __device__ float alpha = 4; __device__ float beta = 2; __device__ float sum[1024]; __device__ float getMax(float vals[4]){ float max = vals[0]; for (int i = 1; i < 4; ++i){ max = max > vals[i] ? max : vals[i]; } return max; } __device__ float myRand(unsigned long seed){ unsigned long next = seed * 1103515245 + 12345; unsigned long temp = ((unsigned)(next/65536) % 32768); return (float)temp/32768; } __global__ void setImageValues(float *img, size_t pitch, float maxValue){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float *q = (float *)((char *)img + j * pitch) + i; *q = (*q) / maxValue; d_imageMatrix[i][j].val = *q; d_imageMatrix[i][j].antCount = 0; d_imageMatrix[i][j].tao = 0.001; } __global__ void setNeighs(int height, int width){ float tl, tm, tr; float ml, mr; float bl, bm, br; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float intens[4]; tl = (i - 1 >= 0 && j - 1 >= 0) ? d_imageMatrix[i-1][j-1].val : d_imageMatrix[i][j].val; br = (i + 1 <= height && j + 1 <= width) ? d_imageMatrix[i+1][j+1].val : d_imageMatrix[i][j].val; tr = (i - 1 >= 0 && j + 1 <= width) ? d_imageMatrix[i-1][j+1].val : d_imageMatrix[i][j].val; bl = (i + 1 <= height && j - 1 >= 0) ? d_imageMatrix[i+1][j-1].val : d_imageMatrix[i][j].val; tm = (i - 1 >= 0) ? d_imageMatrix[i-1][j].val : d_imageMatrix[i][j].val; bm = (i + 1 < height) ? d_imageMatrix[i+1][j].val : d_imageMatrix[i][j].val; ml = (j - 1 >= 0) ? d_imageMatrix[i][j-1].val : d_imageMatrix[i][j].val; mr = (j + 1 < width) ? d_imageMatrix[i][j+1].val : d_imageMatrix[i][j].val; intens[0] = fabs(tl - br); intens[1] = fabs(tr - bl); intens[2] = fabs(ml - mr); intens[3] = fabs(tm - bm); d_imageMatrix[i][j].ni = d_imageMatrix[i][j].val * getMax(intens); int index = 0; if (i - 1 >= 0 && j - 1 >= 0) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i-1][j-1]; if (i + 1 < height && j + 1 < width) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i+1][j+1]; if (i - 1 >= 0 && j + 1 < width) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i-1][j+1]; if (i + 1 < height && j - 1 >= 0) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i+1][j-1]; if (i - 1 >= 0) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i-1][j]; if (i + 1 < height) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i+1][j]; if (j - 1 >= 0) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i][j-1]; if (j + 1 < width) d_imageMatrix[i][j].neigh[index++] = &d_imageMatrix[i][j+1]; d_imageMatrix[i][j].neighCount = index; } __global__ void setAnts(ant ants[ANTS]){ int i = blockIdx.x * blockDim.x + threadIdx.x; int x = ants[i].startx; int y = ants[i].starty; ants[i].push_back(&d_imageMatrix[x][y]); } __global__ void walk(ant ants[ANTS], unsigned long seed){ int antIndex = blockIdx.x * blockDim.x + threadIdx.x; if (antIndex >= ANTS) return; int admissibleCount = 0; position *admissible[8]; position *last = ants[antIndex].path.last(); double probabilities[8]; double probSum = 0; if (ants[antIndex].path.getCount() == 1){ for (int i = 0; i < last->neighCount; ++i){ admissible[i] = last->neigh[i]; ++admissibleCount; } for (int i = 0; i < admissibleCount; ++i){ position *tmp = last->neigh[i]; double probability = powf(tmp->tao, alpha) * powf(tmp->ni, beta); probabilities[i] = probability; probSum += probability; } } else if (ants[antIndex].path.getCount() > 1){ position *penultimate = ants[antIndex].path.penultimate(); for (int neighbors = 0; neighbors < last->neighCount; ++neighbors){ if (ants[antIndex].path.contains(last->neigh[neighbors]) || (last->neigh[neighbors] == penultimate)) continue; admissible[admissibleCount++] = last->neigh[neighbors]; } --admissibleCount; for (int i = 0; i < admissibleCount; ++i){ position *tmp = admissible[i]; double probability = powf(tmp->tao, alpha) * powf(tmp->ni, beta); probabilities[i] = probability; probSum += probability; } } double r = myRand(antIndex + seed) * probSum; double acumulatedSum = 0; position *next = 0; for (int i = 0; i < admissibleCount; ++i){ acumulatedSum += probabilities[i]; if (r < acumulatedSum) next = last->neigh[i]; } if (!next){ if (admissibleCount) next = admissible[admissibleCount]; else { int index = (int)myRand(seed << 3) * 32786 % ants[antIndex].path.size(); next = ants[antIndex].path[index]; } //next = ants[antIndex].path[0]; } atomicAdd(&next->antCount, 1); ants[antIndex].push_back(next); } __global__ void updateTrails(){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0; if (d_imageMatrix[i][j].ni >= 0.08) { sum = d_imageMatrix[i][j].ni * d_imageMatrix[i][j].antCount; } d_imageMatrix[i][j].tao = d_imageMatrix[i][j].tao * (1 - 0.04) + sum; d_imageMatrix[i][j].antCount = 0; } int main(int argc, char *argv[]){ IplImage *in = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE); int height = in->height; int width = in->width; // showImage(in); float maxValue = 0; /* float *hostImageValues = (float *)malloc(height * width * sizeof(float)); for (int i = 0; i < height; ++i){ for (int j = 0; j < width; ++j){ *(hostImageValues + i * width + j) = ((uchar *)(in->imageData + i*in->widthStep))[j]; maxValue = maxValue > *(hostImageValues + i * width + j) ? maxValue : *(hostImageValues + i * width + j); } } */ float hostImageValues[512][512]; for (int i = 0; i < height; ++i){ for (int j = 0; j < width; ++j){ hostImageValues[i][j] = cvGet2D(in, i, j).val[0]; maxValue = maxValue > hostImageValues[i][j] ? maxValue : hostImageValues[i][j]; } } float *imageIntensityValues; size_t pitch; cudaMallocPitch((void **)&imageIntensityValues, &pitch, sizeof(float) * width, height); cudaMemcpy(imageIntensityValues, hostImageValues, sizeof(float) * width * height, cudaMemcpyHostToDevice); dim3 threadsPerBlock(16, 16); dim3 numBlocks(512 / threadsPerBlock.x, 512 / threadsPerBlock.y); setImageValues<<<numBlocks, threadsPerBlock>>>(imageIntensityValues, pitch, maxValue); setNeighs<<<numBlocks, threadsPerBlock>>>(height, width); map<pair<int, int>, bool> mapa; ant mravi[ANTS]; srand((unsigned)time(NULL)); int k = 0; while(k < ANTS){ int i = rand() % height; int j = rand() % width; pair<int, int> lokacija (i, j); if (mapa.find(lokacija) != mapa.end()) continue; mapa[lokacija] = true; ++k; } map<pair<int, int>, bool>::iterator it; int index = 0; for (it = mapa.begin(); it != mapa.end(); ++it){ pair<int, int> p = it->first; int x = p.first; int y = p.second; int len = rand() % 15 + 25 + 1; mravi[index] = ant(len); mravi[index++].setStartPosition(x, y); } ant *deviceAnts; cudaMalloc((void **)&deviceAnts, ANTS * sizeof(ant)); cudaMemcpy(deviceAnts, mravi, ANTS * sizeof(ant), cudaMemcpyHostToDevice); setAnts<<<32, 32>>>(deviceAnts); int iter = atoi(argv[2]); for (int i = 0; i < iter; ++i){ walk<<<32, 32>>>(deviceAnts, (unsigned)time(NULL)); updateTrails<<<numBlocks, threadsPerBlock>>>(); } position *slika = (position *)malloc(512 * 512 * sizeof(position)); cudaMemcpyFromSymbol(slika, "d_imageMatrix", 512 * 512 * sizeof(position), 0, cudaMemcpyDeviceToHost); IplImage *out = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1); float total = 0; for (int i = 0; i < height; ++i){ for (int j = 0; j < width; ++j){ total = total + slika[i * width + j].tao; } } printf("%f\n", total); total /= (width * height); for (int i = 0; i < height; ++i){ for (int j = 0; j < width; ++j){ if (slika[i * width + j].tao >= total) cvSet2D(out, j, i, cvScalar(255,0,0,0)); } } cudaFree(imageIntensityValues); cudaFree(deviceAnts); //free(hostImageValues); showImage(out); cvReleaseImage(&out); cvReleaseImage(&in); free(slika); return 0; }
4986716d58ec7888e857ef57d6f2e154b0f4cf3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! CanonicalAxis <T = int32, Device = CUDA> */ template <typename T> __global__ void _CanonicalAxis( const int count, const int dim, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { if (y[idx] < 0) y[idx] += dim; } } template <> void CanonicalAxis<int, CUDAContext>( const int count, const int dim, int* y, CUDAContext* ctx) { _CanonicalAxis<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, dim, y); } /*! Gather <T = ?, Device = CUDA> */ template <typename T> __global__ void _Gather( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } /*! Gather <T = float32, Device = CUDA> */ template <> void Gather<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* x, float* y, CUDAContext* ctx) { _Gather<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); } /*! Gather <T = int32, Device = CUDA> */ template <> void Gather<int, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* x, int* y, CUDAContext* ctx) { _Gather<int> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); } /*! GatherGrad <T = ?, Device = CUDA> */ template <typename T> __global__ void _GatherGrad( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; atomicAdd(dx + x_idx, dy[idx]); } } /*! GatherGrad <T = float32, Device = CUDA> */ template <> void GatherGrad<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* dy, float* dx, CUDAContext* ctx) { _GatherGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); } /*! GatherGrad <T = int32, Device = CUDA> */ template <> void GatherGrad<int, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* dy, int* dx, CUDAContext* ctx) { _GatherGrad<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
4986716d58ec7888e857ef57d6f2e154b0f4cf3a.cu
#ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! CanonicalAxis <T = int32, Device = CUDA> */ template <typename T> __global__ void _CanonicalAxis( const int count, const int dim, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { if (y[idx] < 0) y[idx] += dim; } } template <> void CanonicalAxis<int, CUDAContext>( const int count, const int dim, int* y, CUDAContext* ctx) { _CanonicalAxis<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, dim, y); } /*! Gather <T = ?, Device = CUDA> */ template <typename T> __global__ void _Gather( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } /*! Gather <T = float32, Device = CUDA> */ template <> void Gather<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* x, float* y, CUDAContext* ctx) { _Gather<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); } /*! Gather <T = int32, Device = CUDA> */ template <> void Gather<int, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* x, int* y, CUDAContext* ctx) { _Gather<int> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); } /*! GatherGrad <T = ?, Device = CUDA> */ template <typename T> __global__ void _GatherGrad( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; atomicAdd(dx + x_idx, dy[idx]); } } /*! GatherGrad <T = float32, Device = CUDA> */ template <> void GatherGrad<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* dy, float* dx, CUDAContext* ctx) { _GatherGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); } /*! GatherGrad <T = int32, Device = CUDA> */ template <> void GatherGrad<int, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* dy, int* dx, CUDAContext* ctx) { _GatherGrad<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
3e3d2012ff3efdc93dcbf9923566cfd9708a0d7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** \file impl_pipe_count_cuda.cu CUDA - .*/ #include "model_pipe_sequential_functions_cuda.cuh" __global__ void FindSequentialQCudaKernel( double p_target , double p_work , double t_work , double den_sc , double co2 , double n2, double d_inner , double d_outer , double roughness_coeff , double hydraulic_efficiency_coeff , double t_env , double heat_exchange_coeff , double length_of_segment , int number_of_segments , double*t_dev , double*q_dev ) { FindSequentialQCuda( p_target , p_work , t_work , den_sc , co2 , n2, d_inner , d_outer , roughness_coeff , hydraulic_efficiency_coeff , t_env , heat_exchange_coeff , length_of_segment , number_of_segments , t_dev , q_dev ); } extern "C" void CountQOnDevice( double p_target , double p_work , double t_work , double den_sc , double co2 , double n2, double d_inner , double d_outer , double roughness_coeff , double hydraulic_efficiency_coeff , double t_env , double heat_exchange_coeff , double length_of_segment, int number_of_segments , double* t_out , double* q_out) { double *q_dev; double *t_dev; hipMalloc( (void**) &q_dev, sizeof(double) ); hipMalloc( (void**) &t_dev, sizeof(double) ); hipLaunchKernelGGL(( FindSequentialQCudaKernel), dim3(1), dim3(1), 0, 0, p_target , p_work , t_work , den_sc , co2 , n2, d_inner , d_outer , roughness_coeff , hydraulic_efficiency_coeff , t_env , heat_exchange_coeff , length_of_segment , number_of_segments , t_dev , q_dev ); hipMemcpy(q_out, q_dev, sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(t_out, t_dev, sizeof(double), hipMemcpyDeviceToHost); hipFree(q_dev); hipFree(t_dev); }
3e3d2012ff3efdc93dcbf9923566cfd9708a0d7f.cu
/** \file impl_pipe_count_cuda.cu Функция для вызова расчёта трубы на CUDA - для тестирования.*/ #include "model_pipe_sequential_functions_cuda.cuh" __global__ void FindSequentialQCudaKernel( double p_target , double p_work , double t_work , double den_sc , double co2 , double n2, double d_inner , double d_outer , double roughness_coeff , double hydraulic_efficiency_coeff , double t_env , double heat_exchange_coeff , double length_of_segment , int number_of_segments , double*t_dev , double*q_dev ) { FindSequentialQCuda( p_target , p_work , t_work , den_sc , co2 , n2, d_inner , d_outer , roughness_coeff , hydraulic_efficiency_coeff , t_env , heat_exchange_coeff , length_of_segment , number_of_segments , t_dev , q_dev ); } extern "C" void CountQOnDevice( double p_target , double p_work , double t_work , double den_sc , double co2 , double n2, double d_inner , double d_outer , double roughness_coeff , double hydraulic_efficiency_coeff , double t_env , double heat_exchange_coeff , double length_of_segment, int number_of_segments , double* t_out , double* q_out) { double *q_dev; double *t_dev; cudaMalloc( (void**) &q_dev, sizeof(double) ); cudaMalloc( (void**) &t_dev, sizeof(double) ); FindSequentialQCudaKernel<<<1, 1>>>( p_target , p_work , t_work , den_sc , co2 , n2, d_inner , d_outer , roughness_coeff , hydraulic_efficiency_coeff , t_env , heat_exchange_coeff , length_of_segment , number_of_segments , t_dev , q_dev ); cudaMemcpy(q_out, q_dev, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(t_out, t_dev, sizeof(double), cudaMemcpyDeviceToHost); cudaFree(q_dev); cudaFree(t_dev); }
60610bfa9ef47bbcc8d4964dcc5df8e5c6459f1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/grid_sampler_op.h" #include "paddle/fluid/platform/cuda_device_function.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { static __forceinline__ __device__ bool in_bounds(int h, int w, int H, int W) { return h >= 0 && h < H && w >= 0 && w < W; } template <typename T> static __forceinline__ __device__ void atomic_add(T* data, int h, int w, int sH, int sW, int H, int W, T delta) { if (in_bounds(h, w, H, W)) { platform::CudaAtomicAdd(data + h * sH + w * sW, delta); } } template <typename T> static __forceinline__ __device__ T _unnormalize(T coord, int size, bool align_corners) { if (align_corners) { return ((coord + 1.f) / 2) * (size - 1); } else { return ((coord + 1.f) * size - 1) / 2; } } template <typename T> static __forceinline__ __device__ T clip_indexes(T in, int max_value) { return min(static_cast<T>(max_value), max(in, static_cast<T>(0))); } template <typename T> static __forceinline__ __device__ T reflect_indexes(T in, int twice_low, int twice_high) { if (twice_low == twice_high) { return static_cast<T>(0); } T min = static_cast<T>(twice_low) / 2; T span = static_cast<T>(twice_high - twice_low) / 2; in = fabs(in - min); T extra = fmod(in, span); int flips = static_cast<int>(floor(in / span)); if (flips % 2 == 0) { return extra + min; } else { return span - extra + min; } } template <typename T> static __forceinline__ __device__ T compute_positions(T coord, int size, PaddingMode padding_mode, bool align_corners) { coord = _unnormalize<T>(coord, size, align_corners); if (padding_mode == PaddingMode::border) { coord = clip_indexes(coord, size - 1); } else if (padding_mode == PaddingMode::reflect) { if (align_corners) { coord = reflect_indexes(coord, 0, 2 * (size - 1)); } else { coord = reflect_indexes(coord, -1, 2 * size - 1); } coord = clip_indexes(coord, size - 1); } return coord; } template <typename T> static __forceinline__ __device__ T _unnormalize_with_mask(T coord, int size, bool align_corners, T* grad_in) { if (align_corners) { *grad_in = static_cast<T>(size - 1) / 2; return ((coord + 1.f) / 2) * (size - 1); } else { *grad_in = static_cast<T>(size) / 2; return ((coord + 1.f) * size - 1) / 2; } } template <typename T> static __forceinline__ __device__ T clip_indexes_with_mask(T in, int clip_limit, T* grad_in) { if (in <= static_cast<T>(0)) { *grad_in = static_cast<T>(0); return static_cast<T>(0); } else { T max = static_cast<T>(clip_limit - 1); if (in >= max) { *grad_in = static_cast<T>(0); return max; } else { *grad_in = static_cast<T>(1); return in; } } } template <typename T> static __forceinline__ __device__ T reflect_indexes_with_mask(T in, int twice_low, int twice_high, T* grad_in) { if (twice_low == twice_high) { *grad_in = static_cast<T>(0); return static_cast<T>(0); } int grad_in_mult_; T min = static_cast<T>(twice_low) / 2; T span = static_cast<T>(twice_high - twice_low) / 2; in = in - min; if (in < static_cast<T>(0)) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } T extra = fmod(in, span); int flips = static_cast<int>(floor(in / span)); if (flips % 2 == 0) { *grad_in = static_cast<T>(grad_in_mult_); return extra + min; } else { *grad_in = static_cast<T>(-grad_in_mult_); return span - extra + min; } } template <typename T> static __forceinline__ __device__ T compute_positions_with_mask(T coord, int size, PaddingMode padding_mode, bool align_corners, T* grad_in) { T grad_clip, grad_refl; coord = _unnormalize_with_mask<T>(coord, size, align_corners, grad_in); if (padding_mode == PaddingMode::border) { coord = clip_indexes_with_mask(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_clip; } else if (padding_mode == PaddingMode::reflect) { if (align_corners) { coord = reflect_indexes_with_mask(coord, 0, 2 * (size - 1), &grad_refl); } else { coord = reflect_indexes_with_mask(coord, -1, 2 * size - 1, &grad_refl); } coord = clip_indexes_with_mask(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_refl * grad_clip; } return coord; } template <typename T> __global__ void grid_sample_cuda_kernel(const int nthreads, int n, int out_c, int out_h, int out_w, int in_h, int in_w, const T* input, const T* grid, T* output, const Mode mode, const PaddingMode padding_mode, bool align_corners) { int inp_sN = out_c * in_h * in_w; int inp_sC = in_h * in_w; int inp_sH = in_w; int inp_sW = 1; int grid_sN = out_h * out_w * 2; int grid_sH = out_w * 2; int grid_sW = 2; int grid_sCoor = 1; int out_sN = out_c * out_h * out_w; int out_sC = out_h * out_w; int out_sH = out_w; int out_sW = 1; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_w; const int h = (index / out_w) % out_h; const int n = index / (out_h * out_w); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; T ix = grid[grid_offset]; T iy = grid[grid_offset + grid_sCoor]; ix = compute_positions(ix, in_w, padding_mode, align_corners); iy = compute_positions(iy, in_h, padding_mode, align_corners); if (mode == Mode::bilinear) { int ix_nw = static_cast<int>(floor(ix)); int iy_nw = static_cast<int>(floor(iy)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; T nw = (ix_se - ix) * (iy_se - iy); T ne = (ix - ix_sw) * (iy_sw - iy); T sw = (ix_ne - ix) * (iy - iy_ne); T se = (ix - ix_nw) * (iy - iy_nw); auto inp_offset_NC = n * inp_sN; auto out_ptr_NCHW = output + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < out_c; ++c, inp_offset_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<T>(0); if (in_bounds(iy_nw, ix_nw, in_h, in_w)) { *out_ptr_NCHW += input[inp_offset_NC + iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (in_bounds(iy_ne, ix_ne, in_h, in_w)) { *out_ptr_NCHW += input[inp_offset_NC + iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (in_bounds(iy_sw, ix_sw, in_h, in_w)) { *out_ptr_NCHW += input[inp_offset_NC + iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (in_bounds(iy_se, ix_se, in_h, in_w)) { *out_ptr_NCHW += input[inp_offset_NC + iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (mode == Mode::nearest) { int ix_nearest = static_cast<int>(std::nearbyint(ix)); int iy_nearest = static_cast<int>(std::nearbyint(iy)); auto inp_offset_NC = n * inp_sN; auto out_ptr_NCHW = output + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < out_c; ++c, inp_offset_NC += inp_sC, out_ptr_NCHW += out_sC) { if (in_bounds(iy_nearest, ix_nearest, in_h, in_w)) { *out_ptr_NCHW = input[inp_offset_NC + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<T>(0); } } } } } template <typename T> class GridSampleOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.cuda_device_context(); auto align_corners = ctx.Attr<bool>("align_corners"); auto padding_mode_s = ctx.Attr<std::string>("padding_mode"); auto mode_s = ctx.Attr<std::string>("mode"); PaddingMode padding_mode; Mode mode; if (padding_mode_s == "border") { padding_mode = PaddingMode::border; } else if (padding_mode_s == "reflection") { padding_mode = PaddingMode::reflect; } else { padding_mode = PaddingMode::zeros; } if (mode_s == "nearest") { mode = Mode::nearest; } else { mode = Mode::bilinear; } auto* input = ctx.Input<Tensor>("X"); auto* grid = ctx.Input<Tensor>("Grid"); const int n = grid->dims()[0]; const int out_h = grid->dims()[1]; const int out_w = grid->dims()[2]; const int c = input->dims()[1]; const int in_h = input->dims()[2]; const int in_w = input->dims()[3]; VLOG(3) << "n: " << n << "; c: " << c << "; out_h: " << out_h << "; out_w: " << out_w; auto* output = ctx.Output<Tensor>("Output"); auto* output_data = output->mutable_data<T>(ctx.GetPlace()); VLOG(3) << "out dims: " << output->dims()[0] << "; " << output->dims()[1] << "; " << output->dims()[2] << "; " << output->dims()[3]; math::SetConstant<paddle::platform::CUDADeviceContext, T>()( dev_ctx, output, static_cast<T>(0)); int count = static_cast<int>(n * out_h * out_w); auto cu_stream = dev_ctx.stream(); int block_size = 512; int grid_size = (count + block_size - 1) / block_size; VLOG(3) << "cuda launch - grid dims: " << grid_size << "; block dims" << block_size; hipLaunchKernelGGL(( grid_sample_cuda_kernel<T>), dim3(grid_size), dim3(block_size), 0, cu_stream, count, n, c, out_h, out_w, in_h, in_w, input->data<T>(), grid->data<T>(), output_data, mode, padding_mode, align_corners); } }; template <typename T> __global__ void grid_sampler_cuda_backward_kernel( const int nthreads, const T* grad_output, const T* input, const T* grid, int n, int out_c, int out_h, int out_w, int in_h, int in_w, T* grad_input, T* grad_grid, const Mode mode, const PaddingMode padding_mode, bool align_corners) { int inp_sN = out_c * in_h * in_w; int inp_sC = in_h * in_w; int inp_sH = in_w; int inp_sW = 1; int grid_sN = out_h * out_w * 2; int grid_sH = out_w * 2; int grid_sW = 2; int grid_sCoor = 1; int gOut_sN = out_c * out_h * out_w; int gOut_sC = out_h * out_w; int gOut_sH = out_w; int gOut_sW = 1; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_w; const int h = (index / out_w) % out_h; const int n = index / (out_h * out_w); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; T ix = grid[grid_offset]; T iy = grid[grid_offset + grid_sCoor]; T gix_mult, giy_mult; ix = compute_positions_with_mask(ix, in_w, padding_mode, align_corners, &gix_mult); iy = compute_positions_with_mask(iy, in_h, padding_mode, align_corners, &giy_mult); if (mode == Mode::bilinear) { int ix_nw = static_cast<int>(floor(ix)); int iy_nw = static_cast<int>(floor(iy)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; T nw = (ix_se - ix) * (iy_se - iy); T ne = (ix - ix_sw) * (iy_sw - iy); T sw = (ix_ne - ix) * (iy - iy_ne); T se = (ix - ix_nw) * (iy - iy_nw); T gix = static_cast<T>(0), giy = static_cast<T>(0); int gOut_offset = n * gOut_sN + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; int inp_offset_NC = n * inp_sN; for (int c = 0; c < out_c; ++c, inp_offset_NC += inp_sC, gInp_ptr_NC += inp_sC, gOut_offset += gOut_sC) { T gOut = grad_output[gOut_offset]; atomic_add(gInp_ptr_NC, iy_nw, ix_nw, inp_sH, inp_sW, in_h, in_w, nw * gOut); atomic_add(gInp_ptr_NC, iy_ne, ix_ne, inp_sH, inp_sW, in_h, in_w, ne * gOut); atomic_add(gInp_ptr_NC, iy_sw, ix_sw, inp_sH, inp_sW, in_h, in_w, sw * gOut); atomic_add(gInp_ptr_NC, iy_se, ix_se, inp_sH, inp_sW, in_h, in_w, se * gOut); if (in_bounds(iy_nw, ix_nw, in_h, in_w)) { T nw_val = input[inp_offset_NC + iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (in_bounds(iy_ne, ix_ne, in_h, in_w)) { T ne_val = input[inp_offset_NC + iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (in_bounds(iy_sw, ix_sw, in_h, in_w)) { T sw_val = input[inp_offset_NC + iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (in_bounds(iy_se, ix_se, in_h, in_w)) { T se_val = input[inp_offset_NC + iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } if (grad_grid != nullptr) { T* gGrid_ptr_NHW = grad_grid + index * grid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } } else if (mode == Mode::nearest) { int ix_nearest = static_cast<int>(std::nearbyint(ix)); int iy_nearest = static_cast<int>(std::nearbyint(iy)); int gOut_offset = n * gOut_sN + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; for (int c = 0; c < out_c; ++c, gInp_ptr_NC += inp_sC, gOut_offset += gOut_sC) { atomic_add(gInp_ptr_NC, iy_nearest, ix_nearest, inp_sH, inp_sW, in_h, in_w, grad_output[gOut_offset]); } if (grad_grid != nullptr) { T* gGrid_ptr_NHW = grad_grid + index * grid_sW; gGrid_ptr_NHW[0] = static_cast<T>(0); gGrid_ptr_NHW[1] = static_cast<T>(0); } } } } template <typename T> class GridSampleGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.cuda_device_context(); auto align_corners = ctx.Attr<bool>("align_corners"); auto padding_mode_s = ctx.Attr<std::string>("padding_mode"); auto mode_s = ctx.Attr<std::string>("mode"); PaddingMode padding_mode; Mode mode; if (padding_mode_s == "border") { padding_mode = PaddingMode::border; } else if (padding_mode_s == "reflection") { padding_mode = PaddingMode::reflect; } else { padding_mode = PaddingMode::zeros; } if (mode_s == "nearest") { mode = Mode::nearest; } else { mode = Mode::bilinear; } auto* input = ctx.Input<Tensor>("X"); auto* grid = ctx.Input<Tensor>("Grid"); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); const int n = grid->dims()[0]; const int out_h = grid->dims()[1]; const int out_w = grid->dims()[2]; const int c = input->dims()[1]; const int in_h = input->dims()[2]; const int in_w = input->dims()[3]; auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); input_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<paddle::platform::CUDADeviceContext, T>()( ctx.template device_context<paddle::platform::CUDADeviceContext>(), input_grad, static_cast<T>(0)); T* grid_grad_data = nullptr; if (ctx.HasOutput(framework::GradVarName("Grid"))) { auto* grid_grad = ctx.Output<Tensor>(framework::GradVarName("Grid")); grid_grad_data = grid_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<paddle::platform::CUDADeviceContext, T>()( ctx.template device_context<paddle::platform::CUDADeviceContext>(), grid_grad, static_cast<T>(0)); } int count = static_cast<int>(n * out_h * out_w); auto cu_stream = dev_ctx.stream(); int block_size = 512; int grid_size = (count + block_size - 1) / block_size; VLOG(3) << "cuda launch grad kernel - grid dims: " << grid_size << "; block dims" << block_size << "; count: " << count; hipLaunchKernelGGL(( grid_sampler_cuda_backward_kernel< T>), dim3(grid_size), dim3(block_size), 0, cu_stream, count, output_grad->data<T>(), input->data<T>(), grid->data<T>(), n, c, out_h, out_w, in_h, in_w, input_grad->data<T>(), grid_grad_data, mode, padding_mode, align_corners); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(grid_sampler, ops::GridSampleOpCUDAKernel<float>, ops::GridSampleOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(grid_sampler_grad, ops::GridSampleGradOpCUDAKernel<float>, ops::GridSampleGradOpCUDAKernel<double>);
60610bfa9ef47bbcc8d4964dcc5df8e5c6459f1c.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/grid_sampler_op.h" #include "paddle/fluid/platform/cuda_device_function.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { static __forceinline__ __device__ bool in_bounds(int h, int w, int H, int W) { return h >= 0 && h < H && w >= 0 && w < W; } template <typename T> static __forceinline__ __device__ void atomic_add(T* data, int h, int w, int sH, int sW, int H, int W, T delta) { if (in_bounds(h, w, H, W)) { platform::CudaAtomicAdd(data + h * sH + w * sW, delta); } } template <typename T> static __forceinline__ __device__ T _unnormalize(T coord, int size, bool align_corners) { if (align_corners) { return ((coord + 1.f) / 2) * (size - 1); } else { return ((coord + 1.f) * size - 1) / 2; } } template <typename T> static __forceinline__ __device__ T clip_indexes(T in, int max_value) { return min(static_cast<T>(max_value), max(in, static_cast<T>(0))); } template <typename T> static __forceinline__ __device__ T reflect_indexes(T in, int twice_low, int twice_high) { if (twice_low == twice_high) { return static_cast<T>(0); } T min = static_cast<T>(twice_low) / 2; T span = static_cast<T>(twice_high - twice_low) / 2; in = fabs(in - min); T extra = fmod(in, span); int flips = static_cast<int>(floor(in / span)); if (flips % 2 == 0) { return extra + min; } else { return span - extra + min; } } template <typename T> static __forceinline__ __device__ T compute_positions(T coord, int size, PaddingMode padding_mode, bool align_corners) { coord = _unnormalize<T>(coord, size, align_corners); if (padding_mode == PaddingMode::border) { coord = clip_indexes(coord, size - 1); } else if (padding_mode == PaddingMode::reflect) { if (align_corners) { coord = reflect_indexes(coord, 0, 2 * (size - 1)); } else { coord = reflect_indexes(coord, -1, 2 * size - 1); } coord = clip_indexes(coord, size - 1); } return coord; } template <typename T> static __forceinline__ __device__ T _unnormalize_with_mask(T coord, int size, bool align_corners, T* grad_in) { if (align_corners) { *grad_in = static_cast<T>(size - 1) / 2; return ((coord + 1.f) / 2) * (size - 1); } else { *grad_in = static_cast<T>(size) / 2; return ((coord + 1.f) * size - 1) / 2; } } template <typename T> static __forceinline__ __device__ T clip_indexes_with_mask(T in, int clip_limit, T* grad_in) { if (in <= static_cast<T>(0)) { *grad_in = static_cast<T>(0); return static_cast<T>(0); } else { T max = static_cast<T>(clip_limit - 1); if (in >= max) { *grad_in = static_cast<T>(0); return max; } else { *grad_in = static_cast<T>(1); return in; } } } template <typename T> static __forceinline__ __device__ T reflect_indexes_with_mask(T in, int twice_low, int twice_high, T* grad_in) { if (twice_low == twice_high) { *grad_in = static_cast<T>(0); return static_cast<T>(0); } int grad_in_mult_; T min = static_cast<T>(twice_low) / 2; T span = static_cast<T>(twice_high - twice_low) / 2; in = in - min; if (in < static_cast<T>(0)) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } T extra = fmod(in, span); int flips = static_cast<int>(floor(in / span)); if (flips % 2 == 0) { *grad_in = static_cast<T>(grad_in_mult_); return extra + min; } else { *grad_in = static_cast<T>(-grad_in_mult_); return span - extra + min; } } template <typename T> static __forceinline__ __device__ T compute_positions_with_mask(T coord, int size, PaddingMode padding_mode, bool align_corners, T* grad_in) { T grad_clip, grad_refl; coord = _unnormalize_with_mask<T>(coord, size, align_corners, grad_in); if (padding_mode == PaddingMode::border) { coord = clip_indexes_with_mask(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_clip; } else if (padding_mode == PaddingMode::reflect) { if (align_corners) { coord = reflect_indexes_with_mask(coord, 0, 2 * (size - 1), &grad_refl); } else { coord = reflect_indexes_with_mask(coord, -1, 2 * size - 1, &grad_refl); } coord = clip_indexes_with_mask(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_refl * grad_clip; } return coord; } template <typename T> __global__ void grid_sample_cuda_kernel(const int nthreads, int n, int out_c, int out_h, int out_w, int in_h, int in_w, const T* input, const T* grid, T* output, const Mode mode, const PaddingMode padding_mode, bool align_corners) { int inp_sN = out_c * in_h * in_w; int inp_sC = in_h * in_w; int inp_sH = in_w; int inp_sW = 1; int grid_sN = out_h * out_w * 2; int grid_sH = out_w * 2; int grid_sW = 2; int grid_sCoor = 1; int out_sN = out_c * out_h * out_w; int out_sC = out_h * out_w; int out_sH = out_w; int out_sW = 1; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_w; const int h = (index / out_w) % out_h; const int n = index / (out_h * out_w); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; T ix = grid[grid_offset]; T iy = grid[grid_offset + grid_sCoor]; ix = compute_positions(ix, in_w, padding_mode, align_corners); iy = compute_positions(iy, in_h, padding_mode, align_corners); if (mode == Mode::bilinear) { int ix_nw = static_cast<int>(floor(ix)); int iy_nw = static_cast<int>(floor(iy)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; T nw = (ix_se - ix) * (iy_se - iy); T ne = (ix - ix_sw) * (iy_sw - iy); T sw = (ix_ne - ix) * (iy - iy_ne); T se = (ix - ix_nw) * (iy - iy_nw); auto inp_offset_NC = n * inp_sN; auto out_ptr_NCHW = output + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < out_c; ++c, inp_offset_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<T>(0); if (in_bounds(iy_nw, ix_nw, in_h, in_w)) { *out_ptr_NCHW += input[inp_offset_NC + iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (in_bounds(iy_ne, ix_ne, in_h, in_w)) { *out_ptr_NCHW += input[inp_offset_NC + iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (in_bounds(iy_sw, ix_sw, in_h, in_w)) { *out_ptr_NCHW += input[inp_offset_NC + iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (in_bounds(iy_se, ix_se, in_h, in_w)) { *out_ptr_NCHW += input[inp_offset_NC + iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (mode == Mode::nearest) { int ix_nearest = static_cast<int>(std::nearbyint(ix)); int iy_nearest = static_cast<int>(std::nearbyint(iy)); auto inp_offset_NC = n * inp_sN; auto out_ptr_NCHW = output + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < out_c; ++c, inp_offset_NC += inp_sC, out_ptr_NCHW += out_sC) { if (in_bounds(iy_nearest, ix_nearest, in_h, in_w)) { *out_ptr_NCHW = input[inp_offset_NC + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<T>(0); } } } } } template <typename T> class GridSampleOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.cuda_device_context(); auto align_corners = ctx.Attr<bool>("align_corners"); auto padding_mode_s = ctx.Attr<std::string>("padding_mode"); auto mode_s = ctx.Attr<std::string>("mode"); PaddingMode padding_mode; Mode mode; if (padding_mode_s == "border") { padding_mode = PaddingMode::border; } else if (padding_mode_s == "reflection") { padding_mode = PaddingMode::reflect; } else { padding_mode = PaddingMode::zeros; } if (mode_s == "nearest") { mode = Mode::nearest; } else { mode = Mode::bilinear; } auto* input = ctx.Input<Tensor>("X"); auto* grid = ctx.Input<Tensor>("Grid"); const int n = grid->dims()[0]; const int out_h = grid->dims()[1]; const int out_w = grid->dims()[2]; const int c = input->dims()[1]; const int in_h = input->dims()[2]; const int in_w = input->dims()[3]; VLOG(3) << "n: " << n << "; c: " << c << "; out_h: " << out_h << "; out_w: " << out_w; auto* output = ctx.Output<Tensor>("Output"); auto* output_data = output->mutable_data<T>(ctx.GetPlace()); VLOG(3) << "out dims: " << output->dims()[0] << "; " << output->dims()[1] << "; " << output->dims()[2] << "; " << output->dims()[3]; math::SetConstant<paddle::platform::CUDADeviceContext, T>()( dev_ctx, output, static_cast<T>(0)); int count = static_cast<int>(n * out_h * out_w); auto cu_stream = dev_ctx.stream(); int block_size = 512; int grid_size = (count + block_size - 1) / block_size; VLOG(3) << "cuda launch - grid dims: " << grid_size << "; block dims" << block_size; grid_sample_cuda_kernel<T><<<grid_size, block_size, 0, cu_stream>>>( count, n, c, out_h, out_w, in_h, in_w, input->data<T>(), grid->data<T>(), output_data, mode, padding_mode, align_corners); } }; template <typename T> __global__ void grid_sampler_cuda_backward_kernel( const int nthreads, const T* grad_output, const T* input, const T* grid, int n, int out_c, int out_h, int out_w, int in_h, int in_w, T* grad_input, T* grad_grid, const Mode mode, const PaddingMode padding_mode, bool align_corners) { int inp_sN = out_c * in_h * in_w; int inp_sC = in_h * in_w; int inp_sH = in_w; int inp_sW = 1; int grid_sN = out_h * out_w * 2; int grid_sH = out_w * 2; int grid_sW = 2; int grid_sCoor = 1; int gOut_sN = out_c * out_h * out_w; int gOut_sC = out_h * out_w; int gOut_sH = out_w; int gOut_sW = 1; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_w; const int h = (index / out_w) % out_h; const int n = index / (out_h * out_w); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; T ix = grid[grid_offset]; T iy = grid[grid_offset + grid_sCoor]; T gix_mult, giy_mult; ix = compute_positions_with_mask(ix, in_w, padding_mode, align_corners, &gix_mult); iy = compute_positions_with_mask(iy, in_h, padding_mode, align_corners, &giy_mult); if (mode == Mode::bilinear) { int ix_nw = static_cast<int>(floor(ix)); int iy_nw = static_cast<int>(floor(iy)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; T nw = (ix_se - ix) * (iy_se - iy); T ne = (ix - ix_sw) * (iy_sw - iy); T sw = (ix_ne - ix) * (iy - iy_ne); T se = (ix - ix_nw) * (iy - iy_nw); T gix = static_cast<T>(0), giy = static_cast<T>(0); int gOut_offset = n * gOut_sN + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; int inp_offset_NC = n * inp_sN; for (int c = 0; c < out_c; ++c, inp_offset_NC += inp_sC, gInp_ptr_NC += inp_sC, gOut_offset += gOut_sC) { T gOut = grad_output[gOut_offset]; atomic_add(gInp_ptr_NC, iy_nw, ix_nw, inp_sH, inp_sW, in_h, in_w, nw * gOut); atomic_add(gInp_ptr_NC, iy_ne, ix_ne, inp_sH, inp_sW, in_h, in_w, ne * gOut); atomic_add(gInp_ptr_NC, iy_sw, ix_sw, inp_sH, inp_sW, in_h, in_w, sw * gOut); atomic_add(gInp_ptr_NC, iy_se, ix_se, inp_sH, inp_sW, in_h, in_w, se * gOut); if (in_bounds(iy_nw, ix_nw, in_h, in_w)) { T nw_val = input[inp_offset_NC + iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (in_bounds(iy_ne, ix_ne, in_h, in_w)) { T ne_val = input[inp_offset_NC + iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (in_bounds(iy_sw, ix_sw, in_h, in_w)) { T sw_val = input[inp_offset_NC + iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (in_bounds(iy_se, ix_se, in_h, in_w)) { T se_val = input[inp_offset_NC + iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } if (grad_grid != nullptr) { T* gGrid_ptr_NHW = grad_grid + index * grid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } } else if (mode == Mode::nearest) { int ix_nearest = static_cast<int>(std::nearbyint(ix)); int iy_nearest = static_cast<int>(std::nearbyint(iy)); int gOut_offset = n * gOut_sN + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; for (int c = 0; c < out_c; ++c, gInp_ptr_NC += inp_sC, gOut_offset += gOut_sC) { atomic_add(gInp_ptr_NC, iy_nearest, ix_nearest, inp_sH, inp_sW, in_h, in_w, grad_output[gOut_offset]); } if (grad_grid != nullptr) { T* gGrid_ptr_NHW = grad_grid + index * grid_sW; gGrid_ptr_NHW[0] = static_cast<T>(0); gGrid_ptr_NHW[1] = static_cast<T>(0); } } } } template <typename T> class GridSampleGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto& dev_ctx = ctx.cuda_device_context(); auto align_corners = ctx.Attr<bool>("align_corners"); auto padding_mode_s = ctx.Attr<std::string>("padding_mode"); auto mode_s = ctx.Attr<std::string>("mode"); PaddingMode padding_mode; Mode mode; if (padding_mode_s == "border") { padding_mode = PaddingMode::border; } else if (padding_mode_s == "reflection") { padding_mode = PaddingMode::reflect; } else { padding_mode = PaddingMode::zeros; } if (mode_s == "nearest") { mode = Mode::nearest; } else { mode = Mode::bilinear; } auto* input = ctx.Input<Tensor>("X"); auto* grid = ctx.Input<Tensor>("Grid"); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); const int n = grid->dims()[0]; const int out_h = grid->dims()[1]; const int out_w = grid->dims()[2]; const int c = input->dims()[1]; const int in_h = input->dims()[2]; const int in_w = input->dims()[3]; auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); input_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<paddle::platform::CUDADeviceContext, T>()( ctx.template device_context<paddle::platform::CUDADeviceContext>(), input_grad, static_cast<T>(0)); T* grid_grad_data = nullptr; if (ctx.HasOutput(framework::GradVarName("Grid"))) { auto* grid_grad = ctx.Output<Tensor>(framework::GradVarName("Grid")); grid_grad_data = grid_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<paddle::platform::CUDADeviceContext, T>()( ctx.template device_context<paddle::platform::CUDADeviceContext>(), grid_grad, static_cast<T>(0)); } int count = static_cast<int>(n * out_h * out_w); auto cu_stream = dev_ctx.stream(); int block_size = 512; int grid_size = (count + block_size - 1) / block_size; VLOG(3) << "cuda launch grad kernel - grid dims: " << grid_size << "; block dims" << block_size << "; count: " << count; grid_sampler_cuda_backward_kernel< T><<<grid_size, block_size, 0, cu_stream>>>( count, output_grad->data<T>(), input->data<T>(), grid->data<T>(), n, c, out_h, out_w, in_h, in_w, input_grad->data<T>(), grid_grad_data, mode, padding_mode, align_corners); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(grid_sampler, ops::GridSampleOpCUDAKernel<float>, ops::GridSampleOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(grid_sampler_grad, ops::GridSampleGradOpCUDAKernel<float>, ops::GridSampleGradOpCUDAKernel<double>);
eef2d607d529726a0850b3f575654833c1e52088.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> __global__ void VecAdd(float* A, float* B, float* C, int N_op,int op_loop){ // N_op : no of total ops // op_loop: no of ops to do in a loop // Host code int j; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N_op){ for (j=0;j<op_loop;j++){ C[i*op_loop+j] = A[i*op_loop+j] + B[i*op_loop+j]; } } } int main() { int N_array[6]; int threadsPerBlock_op=256; int op_loop=4; int loop; int N_loop; int avg_loop; float time_spent; int clock_loop; avg_loop=100; //GPU timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); for (N_loop=0;N_loop<6;N_loop++){ // Size of vector array N_array[N_loop]=pow(2,15+N_loop); } for (N_loop=0;N_loop<6;N_loop++){ // size of array size_t size = N_array[N_loop]* sizeof(float); // Allocate input vectors h_A and h_B in host memory float* h_A = (float*)malloc(size); float* h_B = (float*)malloc(size); float* h_C = (float*)malloc(size); // Initialize input vectors // printf("Array A (first 10 values) \n "); for(loop = 0; loop < N_array[N_loop]; loop++){ h_A[loop] = rand() % 100 + 1; //if (loop<10){ // printf("%f ", h_A[loop]); // } } //printf("\nArray B (first 10 values) \n "); for(loop = 0; loop < N_array[N_loop]; loop++){ h_B[loop] = rand() % 100 + 1; //if (loop<10){ // printf("%f ", h_B[loop]); //} } // Allocate vectors in device memory float* d_A; hipMalloc(&d_A, size); float* d_B; hipMalloc(&d_B, size); float* d_C; hipMalloc(&d_C, size); // Copy vectors from host memory to device memory hipMemcpy(d_A, h_A, size,hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size,hipMemcpyHostToDevice); //vector size per loop //printf("Vector size per loop %d",N_array[N_loop]); // Invoke kernel int threadsPerBlock = threadsPerBlock_op; int N_op=(N_array[N_loop] + op_loop -1)/op_loop; int blocksPerGrid = (N_op + threadsPerBlock - 1) /threadsPerBlock; for(clock_loop=0;clock_loop<avg_loop;clock_loop++){ if (clock_loop==1){ hipEventRecord(start, 0); } hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A,d_B, d_C, N_op,op_loop); if (clock_loop==avg_loop-1){ hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time_spent, start, stop); } } // h_C contains the result in host memory hipMemcpy(h_C, d_C, size,hipMemcpyDeviceToHost); //printf("\nArray C (first 10 outputs)\n"); //for(loop = 0; loop < 10; loop++) // printf("%f ", h_C[loop]); // Free device memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // Free host memory free(h_A); free(h_B); time_spent=time_spent/(avg_loop-1)*10; printf("\n Average Time spent in loop %d is %f",N_loop,time_spent); } return 0; }
eef2d607d529726a0850b3f575654833c1e52088.cu
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> __global__ void VecAdd(float* A, float* B, float* C, int N_op,int op_loop){ // N_op : no of total ops // op_loop: no of ops to do in a loop // Host code int j; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N_op){ for (j=0;j<op_loop;j++){ C[i*op_loop+j] = A[i*op_loop+j] + B[i*op_loop+j]; } } } int main() { int N_array[6]; int threadsPerBlock_op=256; int op_loop=4; int loop; int N_loop; int avg_loop; float time_spent; int clock_loop; avg_loop=100; //GPU timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); for (N_loop=0;N_loop<6;N_loop++){ // Size of vector array N_array[N_loop]=pow(2,15+N_loop); } for (N_loop=0;N_loop<6;N_loop++){ // size of array size_t size = N_array[N_loop]* sizeof(float); // Allocate input vectors h_A and h_B in host memory float* h_A = (float*)malloc(size); float* h_B = (float*)malloc(size); float* h_C = (float*)malloc(size); // Initialize input vectors // printf("Array A (first 10 values) \n "); for(loop = 0; loop < N_array[N_loop]; loop++){ h_A[loop] = rand() % 100 + 1; //if (loop<10){ // printf("%f ", h_A[loop]); // } } //printf("\nArray B (first 10 values) \n "); for(loop = 0; loop < N_array[N_loop]; loop++){ h_B[loop] = rand() % 100 + 1; //if (loop<10){ // printf("%f ", h_B[loop]); //} } // Allocate vectors in device memory float* d_A; cudaMalloc(&d_A, size); float* d_B; cudaMalloc(&d_B, size); float* d_C; cudaMalloc(&d_C, size); // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A, size,cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice); //vector size per loop //printf("Vector size per loop %d",N_array[N_loop]); // Invoke kernel int threadsPerBlock = threadsPerBlock_op; int N_op=(N_array[N_loop] + op_loop -1)/op_loop; int blocksPerGrid = (N_op + threadsPerBlock - 1) /threadsPerBlock; for(clock_loop=0;clock_loop<avg_loop;clock_loop++){ if (clock_loop==1){ cudaEventRecord(start, 0); } VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A,d_B, d_C, N_op,op_loop); if (clock_loop==avg_loop-1){ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_spent, start, stop); } } // h_C contains the result in host memory cudaMemcpy(h_C, d_C, size,cudaMemcpyDeviceToHost); //printf("\nArray C (first 10 outputs)\n"); //for(loop = 0; loop < 10; loop++) // printf("%f ", h_C[loop]); // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free host memory free(h_A); free(h_B); time_spent=time_spent/(avg_loop-1)*10; printf("\n Average Time spent in loop %d is %f",N_loop,time_spent); } return 0; }
536d500d66455f69d6c38b06a41114778792b6ac.hip
// !!! This is a file automatically generated by hipify!!! #include <bits/stdc++.h> #include <stdint.h> #include "labeling.h" #define DEBUG #define DUMP_ALL namespace { uint32_t seed = 0; void p_srand(uint32_t x) { seed = x;} uint32_t p_rand() {return seed = (seed*9301 + 49297);} } int main() { hipSetDevice(1); static const int MAXN = 16777216<<1; static const int MAXTOKEN = 500; static char s[MAXN]; static int ret[MAXN]; static char *cuStr; static int32_t *cuPos; int cases = 0; int n, m1, m2, seed; while (scanf("%d %d %d %d", &n, &m1, &m2, &seed) == 4) { assert(n <= MAXN); assert(m1 <= MAXTOKEN); p_srand(seed); hipMalloc(&cuStr, sizeof(char)*n); hipMalloc(&cuPos, sizeof(int32_t)*n); // random string { int pos = 0; for (; pos < n;) { int sp = pos == 0 ? p_rand()%m2 : p_rand()%m2+1; int cp = p_rand()%m1+1; for (; sp && pos < n; sp--, pos++) s[pos] = ' '; for (; cp && pos < n; cp--, pos++) s[pos] = 'a' + p_rand()%26; } #ifdef DEBUG for (int i = 0; i < n; i++) putchar(s[i]); puts(""); #endif hipMemcpy(cuStr, s, sizeof(char)*n, hipMemcpyHostToDevice); } // test performance { clock_t st, ed; st = clock(); const int ROUND = 512; for (int i = 0; i < ROUND; i++) { labeling(cuStr, cuPos, n); } hipMemcpy(ret, cuPos, sizeof(int32_t)*n, hipMemcpyDeviceToHost); ed = clock() - st; fprintf(stderr, "It took average %lf seconds.\n", ((float) ed)/CLOCKS_PER_SEC/ROUND); } // check { clock_t st, ed; st = clock(); uint32_t HEX = 0; for (int i = 0, sum = 0; i < n; i++) { #ifdef DEBUG // printf("%d%c", ret[i], " \n"[i==n-1]); printf("%d", ret[i]); if (i == n-1) puts(""); #endif if (s[i] > ' ') sum++; else sum = 0; #ifndef DUMP_ALL assert(sum == ret[i]); #endif HEX ^= i*ret[i]; } ed = clock() - st; fprintf(stderr, "Check task took %lf seconds.\n", ((float) ed)/CLOCKS_PER_SEC); printf("Case #%d: PASS %X\n", ++cases, HEX); } hipFree(cuPos); hipFree(cuStr); } return 0; }
536d500d66455f69d6c38b06a41114778792b6ac.cu
#include <bits/stdc++.h> #include <stdint.h> #include "labeling.h" #define DEBUG #define DUMP_ALL namespace { uint32_t seed = 0; void p_srand(uint32_t x) { seed = x;} uint32_t p_rand() {return seed = (seed*9301 + 49297);} } int main() { cudaSetDevice(1); static const int MAXN = 16777216<<1; static const int MAXTOKEN = 500; static char s[MAXN]; static int ret[MAXN]; static char *cuStr; static int32_t *cuPos; int cases = 0; int n, m1, m2, seed; while (scanf("%d %d %d %d", &n, &m1, &m2, &seed) == 4) { assert(n <= MAXN); assert(m1 <= MAXTOKEN); p_srand(seed); cudaMalloc(&cuStr, sizeof(char)*n); cudaMalloc(&cuPos, sizeof(int32_t)*n); // random string { int pos = 0; for (; pos < n;) { int sp = pos == 0 ? p_rand()%m2 : p_rand()%m2+1; int cp = p_rand()%m1+1; for (; sp && pos < n; sp--, pos++) s[pos] = ' '; for (; cp && pos < n; cp--, pos++) s[pos] = 'a' + p_rand()%26; } #ifdef DEBUG for (int i = 0; i < n; i++) putchar(s[i]); puts(""); #endif cudaMemcpy(cuStr, s, sizeof(char)*n, cudaMemcpyHostToDevice); } // test performance { clock_t st, ed; st = clock(); const int ROUND = 512; for (int i = 0; i < ROUND; i++) { labeling(cuStr, cuPos, n); } cudaMemcpy(ret, cuPos, sizeof(int32_t)*n, cudaMemcpyDeviceToHost); ed = clock() - st; fprintf(stderr, "It took average %lf seconds.\n", ((float) ed)/CLOCKS_PER_SEC/ROUND); } // check { clock_t st, ed; st = clock(); uint32_t HEX = 0; for (int i = 0, sum = 0; i < n; i++) { #ifdef DEBUG // printf("%d%c", ret[i], " \n"[i==n-1]); printf("%d", ret[i]); if (i == n-1) puts(""); #endif if (s[i] > ' ') sum++; else sum = 0; #ifndef DUMP_ALL assert(sum == ret[i]); #endif HEX ^= i*ret[i]; } ed = clock() - st; fprintf(stderr, "Check task took %lf seconds.\n", ((float) ed)/CLOCKS_PER_SEC); printf("Case #%d: PASS %X\n", ++cases, HEX); } cudaFree(cuPos); cudaFree(cuStr); } return 0; }
719b53d7c1f7ac2cfe17dcb42b0eccb5eae92250.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> // for rand(), malloc(), free() #include <io.h> // for open(), write(), close() in WIN32 #include <fcntl.h> // for open(), write() #include <sys/stat.h> #include <windows.h> // for high-resolution performance counter #if defined(NDEBUG) #define CUDA_CHECK(x) (x) #else #define CUDA_CHECK(x) do {\ (x); \ hipError_t e = hipGetLastError(); \ if (hipSuccess != e) { \ printf("cuda failure \"%s\" at %s:%d\n", \ hipGetErrorString(e), \ __FILE__, __LINE__); \ exit(1); \ } \ } while (0) #endif #define GRIDSIZE (8 * 1024) #define BLOCKSIZE 1024 #define TOTALSIZE (GRIDSIZE * BLOCKSIZE) void genData(float* ptr, unsigned int size) { while (size--) { *ptr++ = (float)(rand() % 1000) / 1000.0F; } } void getDiff(float* dst, const float* src, unsigned int size) { for (register int i = 1; i < size; ++i) { dst[i] = src[i] - src[i - 1]; } } void writeData(char* filename, const float* src, unsigned int size) { int fd = open(filename, O_WRONLY | O_BINARY | O_CREAT, S_IREAD | S_IWRITE); write(fd, src, size * sizeof(float)); close(fd); printf("data written to \"%s\"\n", filename); } __global__ void adj_diff_shared(float* result, float* input) { __shared__ float s_data[BLOCKSIZE]; register unsigned int tx = threadIdx.x; register unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; register float answer; s_data[tx] = input[i]; __syncthreads(); if (tx > 0) { answer = s_data[tx] - s_data[tx - 1]; } else if (i > 0) { answer = s_data[tx] - input[i - 1]; } result[i] = answer; } int main(void) { float* pSource = NULL; float* pResult = NULL; int i; long long cntStart, cntEnd, freq; QueryPerformanceFrequency((LARGE_INTEGER*)(&freq)); // malloc memories on the host-side pSource = (float*)malloc(TOTALSIZE * sizeof(float)); pResult = (float*)malloc(TOTALSIZE * sizeof(float)); // generate source data genData(pSource, TOTALSIZE); // CUDA: allocate device memory float* pSourceDev = NULL; float* pResultDev = NULL; CUDA_CHECK( hipMalloc((void**)&pSourceDev, TOTALSIZE * sizeof(float)) ); CUDA_CHECK( hipMalloc((void**)&pResultDev, TOTALSIZE * sizeof(float)) ); // CUDA: copy from host to device CUDA_CHECK( hipMemcpy(pSourceDev, pSource, TOTALSIZE * sizeof(float), hipMemcpyHostToDevice) ); // start timer QueryPerformanceCounter((LARGE_INTEGER*)(&cntStart)); // start the stop watch // CUDA: launch the kernel: result[i] = input[i] - input[i-1] dim3 dimGrid(GRIDSIZE, 1, 1); dim3 dimBlock(BLOCKSIZE, 1, 1); hipLaunchKernelGGL(( adj_diff_shared) , dim3(dimGrid), dim3(dimBlock), 0, 0, pResultDev, pSourceDev); // end timer QueryPerformanceCounter((LARGE_INTEGER*)(&cntEnd)); // end the stop watch printf("elapsed time = %f usec\n", (double)(cntEnd - cntStart) * 1000000.0 / (double)(freq)); // CUDA: copy from device to host CUDA_CHECK( hipMemcpy(pResult, pResultDev, TOTALSIZE * sizeof(float), hipMemcpyDeviceToHost) ); // write the result on the disk // writeData("host.out", pResult, TOTALSIZE); // print sample cases i = 0; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); i = TOTALSIZE - 1; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); i = TOTALSIZE / 2; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); // CUDA: free the memory CUDA_CHECK( hipFree(pSourceDev) ); CUDA_CHECK( hipFree(pResultDev) ); // free the memory free(pSource); free(pResult); }
719b53d7c1f7ac2cfe17dcb42b0eccb5eae92250.cu
#include <stdio.h> #include <stdlib.h> // for rand(), malloc(), free() #include <io.h> // for open(), write(), close() in WIN32 #include <fcntl.h> // for open(), write() #include <sys/stat.h> #include <windows.h> // for high-resolution performance counter #if defined(NDEBUG) #define CUDA_CHECK(x) (x) #else #define CUDA_CHECK(x) do {\ (x); \ cudaError_t e = cudaGetLastError(); \ if (cudaSuccess != e) { \ printf("cuda failure \"%s\" at %s:%d\n", \ cudaGetErrorString(e), \ __FILE__, __LINE__); \ exit(1); \ } \ } while (0) #endif #define GRIDSIZE (8 * 1024) #define BLOCKSIZE 1024 #define TOTALSIZE (GRIDSIZE * BLOCKSIZE) void genData(float* ptr, unsigned int size) { while (size--) { *ptr++ = (float)(rand() % 1000) / 1000.0F; } } void getDiff(float* dst, const float* src, unsigned int size) { for (register int i = 1; i < size; ++i) { dst[i] = src[i] - src[i - 1]; } } void writeData(char* filename, const float* src, unsigned int size) { int fd = open(filename, O_WRONLY | O_BINARY | O_CREAT, S_IREAD | S_IWRITE); write(fd, src, size * sizeof(float)); close(fd); printf("data written to \"%s\"\n", filename); } __global__ void adj_diff_shared(float* result, float* input) { __shared__ float s_data[BLOCKSIZE]; register unsigned int tx = threadIdx.x; register unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; register float answer; s_data[tx] = input[i]; __syncthreads(); if (tx > 0) { answer = s_data[tx] - s_data[tx - 1]; } else if (i > 0) { answer = s_data[tx] - input[i - 1]; } result[i] = answer; } int main(void) { float* pSource = NULL; float* pResult = NULL; int i; long long cntStart, cntEnd, freq; QueryPerformanceFrequency((LARGE_INTEGER*)(&freq)); // malloc memories on the host-side pSource = (float*)malloc(TOTALSIZE * sizeof(float)); pResult = (float*)malloc(TOTALSIZE * sizeof(float)); // generate source data genData(pSource, TOTALSIZE); // CUDA: allocate device memory float* pSourceDev = NULL; float* pResultDev = NULL; CUDA_CHECK( cudaMalloc((void**)&pSourceDev, TOTALSIZE * sizeof(float)) ); CUDA_CHECK( cudaMalloc((void**)&pResultDev, TOTALSIZE * sizeof(float)) ); // CUDA: copy from host to device CUDA_CHECK( cudaMemcpy(pSourceDev, pSource, TOTALSIZE * sizeof(float), cudaMemcpyHostToDevice) ); // start timer QueryPerformanceCounter((LARGE_INTEGER*)(&cntStart)); // start the stop watch // CUDA: launch the kernel: result[i] = input[i] - input[i-1] dim3 dimGrid(GRIDSIZE, 1, 1); dim3 dimBlock(BLOCKSIZE, 1, 1); adj_diff_shared <<< dimGrid, dimBlock>>>(pResultDev, pSourceDev); // end timer QueryPerformanceCounter((LARGE_INTEGER*)(&cntEnd)); // end the stop watch printf("elapsed time = %f usec\n", (double)(cntEnd - cntStart) * 1000000.0 / (double)(freq)); // CUDA: copy from device to host CUDA_CHECK( cudaMemcpy(pResult, pResultDev, TOTALSIZE * sizeof(float), cudaMemcpyDeviceToHost) ); // write the result on the disk // writeData("host.out", pResult, TOTALSIZE); // print sample cases i = 0; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); i = TOTALSIZE - 1; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); i = TOTALSIZE / 2; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); // CUDA: free the memory CUDA_CHECK( cudaFree(pSourceDev) ); CUDA_CHECK( cudaFree(pResultDev) ); // free the memory free(pSource); free(pResult); }
ef279ad834311f9584ea55d06837886ab02245a8.hip
// !!! This is a file automatically generated by hipify!!! /* * ARQUITECTURA DE COMPUTADORES * 2 Grado en Ingenieria Informatica * Curso 2020/21 * * ENTREGA no.3 Grficos en CUDA. * * EQUIPO: G6 ARCO 103 * MIEMBROS: Gonzalez Martinez Sergio * Arnaiz Lopez Lucia * San Martin Liendo Alvar * */ /////////////////////////////////////////////////////////////////////////// // includes #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "gpu_bitmap.h" // defines #define TCASILLA 4 #define ANCHO 128* TCASILLA // Dimension horizontal #define ALTO 128*TCASILLA // Dimension vertical // GLOBAL: funcion llamada desde el host y ejecutada en el device (kernel) __global__ void kernel(unsigned char* imagen) { // ** Kernel bidimensional multibloque ** // // coordenada horizontal de cada hilo int x = threadIdx.x + blockIdx.x * blockDim.x; // coordenada vertical de cada hilo int y = threadIdx.y + blockIdx.y * blockDim.y; // indice global de cada hilo (indice lineal para acceder a la memoria) int myID = x + y * blockDim.x * gridDim.x; // cada hilo obtiene la posicion de su pixel int miPixel = myID * 4; int idBloque = blockIdx.x / TCASILLA + blockIdx.y / TCASILLA; if (idBloque % 2 == 0) { imagen[miPixel + 0] = 0; // canal R imagen[miPixel + 1] = 0; // canal G imagen[miPixel + 2] = 0; // canal B imagen[miPixel + 3] = 0; // canal alf // cada hilo rellena los 4 canales de su pixel con un valor } else { imagen[miPixel + 0] = 255; // canal R imagen[miPixel + 1] = 255; // canal G imagen[miPixel + 2] = 255; // canal B imagen[miPixel + 3] = 255; // canal alf } } // MAIN: rutina principal ejecutada en el host int main(int argc, char** argv) { // Declaracion del bitmap: // Inicializacion de la estructura RenderGPU RenderGPU foto(ANCHO, ALTO); // Tamao del bitmap en bytes size_t size = foto.image_size(); // Asignacion y reserva de la memoria en el host (framebuffer) unsigned char* host_bitmap = foto.get_ptr(); // Reserva en el device unsigned char* dev_bitmap; // declaracion de eventos hipEvent_t start; hipEvent_t stop; // creacion de eventos hipEventCreate(&start); hipEventCreate(&stop); hipMalloc((void**)&dev_bitmap, size); // Lanzamos un kernel bidimensional con bloques de 256 hilos (16x16) dim3 hilosB(16, 16); // Calculamos el numero de bloques necesario (un hilo por cada pixel) dim3 Nbloques(ANCHO / 16, ALTO / 16); // marca de inicio hipEventRecord(start, 0); // Generamos el bitmap kernel << <Nbloques, hilosB >> > (dev_bitmap); // Copiamos los datos desde la GPU hasta el framebuffer para visualizarlos hipMemcpy(host_bitmap, dev_bitmap, size, hipMemcpyDeviceToHost); hipEventRecord(stop, 0); // sincronizacion GPU-CPU hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); // impresion de resultados printf("> Tiempo de ejecucion: %f ms\n", elapsedTime); // Visualizacion y salida printf("\n...pulsa [ESC] para finalizar..."); foto.display_and_exit(); return 0; }
ef279ad834311f9584ea55d06837886ab02245a8.cu
/* * ARQUITECTURA DE COMPUTADORES * 2º Grado en Ingenieria Informatica * Curso 2020/21 * * ENTREGA no.3 Gráficos en CUDA. * * EQUIPO: G6 ARCO 103 * MIEMBROS: Gonzalez Martinez Sergio * Arnaiz Lopez Lucia * San Martin Liendo Alvar * */ /////////////////////////////////////////////////////////////////////////// // includes #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "gpu_bitmap.h" // defines #define TCASILLA 4 #define ANCHO 128* TCASILLA // Dimension horizontal #define ALTO 128*TCASILLA // Dimension vertical // GLOBAL: funcion llamada desde el host y ejecutada en el device (kernel) __global__ void kernel(unsigned char* imagen) { // ** Kernel bidimensional multibloque ** // // coordenada horizontal de cada hilo int x = threadIdx.x + blockIdx.x * blockDim.x; // coordenada vertical de cada hilo int y = threadIdx.y + blockIdx.y * blockDim.y; // indice global de cada hilo (indice lineal para acceder a la memoria) int myID = x + y * blockDim.x * gridDim.x; // cada hilo obtiene la posicion de su pixel int miPixel = myID * 4; int idBloque = blockIdx.x / TCASILLA + blockIdx.y / TCASILLA; if (idBloque % 2 == 0) { imagen[miPixel + 0] = 0; // canal R imagen[miPixel + 1] = 0; // canal G imagen[miPixel + 2] = 0; // canal B imagen[miPixel + 3] = 0; // canal alf // cada hilo rellena los 4 canales de su pixel con un valor } else { imagen[miPixel + 0] = 255; // canal R imagen[miPixel + 1] = 255; // canal G imagen[miPixel + 2] = 255; // canal B imagen[miPixel + 3] = 255; // canal alf } } // MAIN: rutina principal ejecutada en el host int main(int argc, char** argv) { // Declaracion del bitmap: // Inicializacion de la estructura RenderGPU RenderGPU foto(ANCHO, ALTO); // Tamaño del bitmap en bytes size_t size = foto.image_size(); // Asignacion y reserva de la memoria en el host (framebuffer) unsigned char* host_bitmap = foto.get_ptr(); // Reserva en el device unsigned char* dev_bitmap; // declaracion de eventos cudaEvent_t start; cudaEvent_t stop; // creacion de eventos cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc((void**)&dev_bitmap, size); // Lanzamos un kernel bidimensional con bloques de 256 hilos (16x16) dim3 hilosB(16, 16); // Calculamos el numero de bloques necesario (un hilo por cada pixel) dim3 Nbloques(ANCHO / 16, ALTO / 16); // marca de inicio cudaEventRecord(start, 0); // Generamos el bitmap kernel << <Nbloques, hilosB >> > (dev_bitmap); // Copiamos los datos desde la GPU hasta el framebuffer para visualizarlos cudaMemcpy(host_bitmap, dev_bitmap, size, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); // sincronizacion GPU-CPU cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); // impresion de resultados printf("> Tiempo de ejecucion: %f ms\n", elapsedTime); // Visualizacion y salida printf("\n...pulsa [ESC] para finalizar..."); foto.display_and_exit(); return 0; }
abe709f71d487e2240590c4c2a3bb6f84cd84d85.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #define CUDA_NUM_THREADS 512 #define THREADS_PER_BLOCK 64 #define DIM0(TENSOR) ((TENSOR).x) #define DIM1(TENSOR) ((TENSOR).y) #define DIM2(TENSOR) ((TENSOR).z) #define DIM3(TENSOR) ((TENSOR).w) #define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))]) template <typename scalar_t> __global__ void kernel_resample2d_update_output(const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } scalar_t val = 0.0f; int dim_b = DIM0(output_size); int dim_c = DIM1(output_size); int dim_h = DIM2(output_size); int dim_w = DIM3(output_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; scalar_t alpha = xf - floor(xf); // alpha scalar_t beta = yf - floor(yf); // beta int xL = max(min( int (floor(xf)), dim_w-1), 0); int xR = max(min( int (floor(xf)+1), dim_w -1), 0); int yT = max(min( int (floor(yf)), dim_h-1), 0); int yB = max(min( int (floor(yf)+1), dim_h-1), 0); for (int fy = 0; fy < kernel_size; fy += 1) { for (int fx = 0; fx < kernel_size; fx += 1) { val += static_cast<float>((1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx)); val += static_cast<float>((alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx)); val += static_cast<float>((1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx)); val += static_cast<float>((alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx)); } } output[index] = val; } template <typename scalar_t> __global__ void kernel_resample2d_backward_input1( const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int dim_b = DIM0(gradOutput_size); int dim_c = DIM1(gradOutput_size); int dim_h = DIM2(gradOutput_size); int dim_w = DIM3(gradOutput_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; scalar_t alpha = xf - int(xf); // alpha scalar_t beta = yf - int(yf); // beta int idim_h = DIM2(input1_size); int idim_w = DIM3(input1_size); int xL = max(min( int (floor(xf)), idim_w-1), 0); int xR = max(min( int (floor(xf)+1), idim_w -1), 0); int yT = max(min( int (floor(yf)), idim_h-1), 0); int yB = max(min( int (floor(yf)+1), idim_h-1), 0); for (int fy = 0; fy < kernel_size; fy += 1) { for (int fx = 0; fx < kernel_size; fx += 1) { atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x)); } } } template <typename scalar_t> __global__ void kernel_resample2d_backward_input2( const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } scalar_t output = 0.0; int kernel_rad = (kernel_size - 1)/2; int dim_b = DIM0(gradInput_size); int dim_c = DIM1(gradInput_size); int dim_h = DIM2(gradInput_size); int dim_w = DIM3(gradInput_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; int odim_c = DIM1(gradOutput_size); scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; int xL = max(min( int (floor(xf)), dim_w-1), 0); int xR = max(min( int (floor(xf)+1), dim_w -1), 0); int yT = max(min( int (floor(yf)), dim_h-1), 0); int yB = max(min( int (floor(yf)+1), dim_h-1), 0); if (c % 2) { float gamma = 1 - (xf - floor(xf)); // alpha for (int i = 0; i <= 2*kernel_rad; ++i) { for (int j = 0; j <= 2*kernel_rad; ++j) { for (int ch = 0; ch < odim_c; ++ch) { output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i)); output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i)); output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i)); output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i)); } } } } else { float gamma = 1 - (yf - floor(yf)); // alpha for (int i = 0; i <= 2*kernel_rad; ++i) { for (int j = 0; j <= 2*kernel_rad; ++j) { for (int ch = 0; ch < odim_c; ++ch) { output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i)); output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i)); output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i)); output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i)); } } } } gradInput[index] = output; } void resample2d_kernel_forward( at::Tensor& input1, at::Tensor& input2, at::Tensor& output, int kernel_size) { int n = output.numel(); const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3)); const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3)); const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3)); const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3)); const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3)); const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)); // TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF // AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] { //kernel_resample2d_update_output<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA() >>>( hipLaunchKernelGGL(( kernel_resample2d_update_output<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() , n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, output.data<float>(), output_size, output_stride, kernel_size); // })); // TODO: ATen-equivalent check // THCudaCheck(hipGetLastError()); } void resample2d_kernel_backward( at::Tensor& input1, at::Tensor& input2, at::Tensor& gradOutput, at::Tensor& gradInput1, at::Tensor& gradInput2, int kernel_size) { int n = gradOutput.numel(); const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3)); const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3)); const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3)); const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3)); const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3)); const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3)); const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3)); const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3)); // AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] { //kernel_resample2d_backward_input1<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA() >>>( hipLaunchKernelGGL(( kernel_resample2d_backward_input1<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() , n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, gradOutput.data<float>(), gradOutput_size, gradOutput_stride, gradInput1.data<float>(), gradInput1_size, gradInput1_stride, kernel_size ); // })); const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3)); const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3)); n = gradInput2.numel(); // AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] { //kernel_resample2d_backward_input2<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA() >>>( hipLaunchKernelGGL(( kernel_resample2d_backward_input2<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() , n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, gradOutput.data<float>(), gradOutput_size, gradOutput_stride, gradInput2.data<float>(), gradInput2_size, gradInput2_stride, kernel_size ); // })); // TODO: Use the ATen equivalent to get last error // THCudaCheck(hipGetLastError()); }
abe709f71d487e2240590c4c2a3bb6f84cd84d85.cu
#include <ATen/ATen.h> #include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #define CUDA_NUM_THREADS 512 #define THREADS_PER_BLOCK 64 #define DIM0(TENSOR) ((TENSOR).x) #define DIM1(TENSOR) ((TENSOR).y) #define DIM2(TENSOR) ((TENSOR).z) #define DIM3(TENSOR) ((TENSOR).w) #define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))]) template <typename scalar_t> __global__ void kernel_resample2d_update_output(const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } scalar_t val = 0.0f; int dim_b = DIM0(output_size); int dim_c = DIM1(output_size); int dim_h = DIM2(output_size); int dim_w = DIM3(output_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; scalar_t alpha = xf - floor(xf); // alpha scalar_t beta = yf - floor(yf); // beta int xL = max(min( int (floor(xf)), dim_w-1), 0); int xR = max(min( int (floor(xf)+1), dim_w -1), 0); int yT = max(min( int (floor(yf)), dim_h-1), 0); int yB = max(min( int (floor(yf)+1), dim_h-1), 0); for (int fy = 0; fy < kernel_size; fy += 1) { for (int fx = 0; fx < kernel_size; fx += 1) { val += static_cast<float>((1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx)); val += static_cast<float>((alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx)); val += static_cast<float>((1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx)); val += static_cast<float>((alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx)); } } output[index] = val; } template <typename scalar_t> __global__ void kernel_resample2d_backward_input1( const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int dim_b = DIM0(gradOutput_size); int dim_c = DIM1(gradOutput_size); int dim_h = DIM2(gradOutput_size); int dim_w = DIM3(gradOutput_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; scalar_t alpha = xf - int(xf); // alpha scalar_t beta = yf - int(yf); // beta int idim_h = DIM2(input1_size); int idim_w = DIM3(input1_size); int xL = max(min( int (floor(xf)), idim_w-1), 0); int xR = max(min( int (floor(xf)+1), idim_w -1), 0); int yT = max(min( int (floor(yf)), idim_h-1), 0); int yB = max(min( int (floor(yf)+1), idim_h-1), 0); for (int fy = 0; fy < kernel_size; fy += 1) { for (int fx = 0; fx < kernel_size; fx += 1) { atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x)); } } } template <typename scalar_t> __global__ void kernel_resample2d_backward_input2( const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } scalar_t output = 0.0; int kernel_rad = (kernel_size - 1)/2; int dim_b = DIM0(gradInput_size); int dim_c = DIM1(gradInput_size); int dim_h = DIM2(gradInput_size); int dim_w = DIM3(gradInput_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; int odim_c = DIM1(gradOutput_size); scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; int xL = max(min( int (floor(xf)), dim_w-1), 0); int xR = max(min( int (floor(xf)+1), dim_w -1), 0); int yT = max(min( int (floor(yf)), dim_h-1), 0); int yB = max(min( int (floor(yf)+1), dim_h-1), 0); if (c % 2) { float gamma = 1 - (xf - floor(xf)); // alpha for (int i = 0; i <= 2*kernel_rad; ++i) { for (int j = 0; j <= 2*kernel_rad; ++j) { for (int ch = 0; ch < odim_c; ++ch) { output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i)); output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i)); output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i)); output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i)); } } } } else { float gamma = 1 - (yf - floor(yf)); // alpha for (int i = 0; i <= 2*kernel_rad; ++i) { for (int j = 0; j <= 2*kernel_rad; ++j) { for (int ch = 0; ch < odim_c; ++ch) { output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i)); output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i)); output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i)); output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i)); } } } } gradInput[index] = output; } void resample2d_kernel_forward( at::Tensor& input1, at::Tensor& input2, at::Tensor& output, int kernel_size) { int n = output.numel(); const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3)); const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3)); const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3)); const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3)); const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3)); const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)); // TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF // AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] { //kernel_resample2d_update_output<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentCUDAStream() >>>( kernel_resample2d_update_output<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>( n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, output.data<float>(), output_size, output_stride, kernel_size); // })); // TODO: ATen-equivalent check // THCudaCheck(cudaGetLastError()); } void resample2d_kernel_backward( at::Tensor& input1, at::Tensor& input2, at::Tensor& gradOutput, at::Tensor& gradInput1, at::Tensor& gradInput2, int kernel_size) { int n = gradOutput.numel(); const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3)); const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3)); const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3)); const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3)); const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3)); const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3)); const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3)); const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3)); // AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] { //kernel_resample2d_backward_input1<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentCUDAStream() >>>( kernel_resample2d_backward_input1<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>( n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, gradOutput.data<float>(), gradOutput_size, gradOutput_stride, gradInput1.data<float>(), gradInput1_size, gradInput1_stride, kernel_size ); // })); const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3)); const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3)); n = gradInput2.numel(); // AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] { //kernel_resample2d_backward_input2<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::globalContext().getCurrentCUDAStream() >>>( kernel_resample2d_backward_input2<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>( n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, gradOutput.data<float>(), gradOutput_size, gradOutput_stride, gradInput2.data<float>(), gradInput2_size, gradInput2_stride, kernel_size ); // })); // TODO: Use the ATen equivalent to get last error // THCudaCheck(cudaGetLastError()); }
c0be030d54bf00d7b62041e33b093fc0ff9c0799.hip
// !!! This is a file automatically generated by hipify!!! /* !===================================================================== ! ! S p e c f e m 3 D V e r s i o n 2 . 1 ! --------------------------------------- ! ! Main authors: Dimitri Komatitsch and Jeroen Tromp ! Princeton University, USA and CNRS / INRIA / University of Pau ! (c) Princeton University / California Institute of Technology and CNRS / INRIA / University of Pau ! July 2012 ! ! This program is free software; you can redistribute it and/or modify ! it under the terms of the GNU General Public License as published by ! the Free Software Foundation; either version 2 of the License, or ! (at your option) any later version. ! ! This program is distributed in the hope that it will be useful, ! but WITHOUT ANY WARRANTY; without even the implied warranty of ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ! GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License along ! with this program; if not, write to the Free Software Foundation, Inc., ! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ! !===================================================================== */ #include <stdio.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <sys/time.h> #include <sys/resource.h> #include "config.h" #include "mesh_constants_cuda.h" /* ----------------------------------------------------------------------------------------------- */ __global__ void compute_stacey_elastic_kernel(realw* veloc, realw* accel, int* abs_boundary_ispec, int* abs_boundary_ijk, realw* abs_boundary_normal, realw* abs_boundary_jacobian2Dw, int* ibool, realw* rho_vp, realw* rho_vs, int* ispec_is_inner, int* ispec_is_elastic, int phase_is_inner, int SIMULATION_TYPE, int SAVE_FORWARD, int num_abs_boundary_faces, realw* b_absorb_field) { int igll = threadIdx.x; // tx int iface = blockIdx.x + gridDim.x*blockIdx.y; // bx int i,j,k,iglob,ispec; realw vx,vy,vz,vn; realw nx,ny,nz; realw rho_vp_temp,rho_vs_temp; realw tx,ty,tz; realw jacobianw; // don't compute points outside NGLLSQUARE==NGLL2==25 // way 2: no further check needed since blocksize = 25 if( iface < num_abs_boundary_faces){ //if(igll < NGLL2 && iface < num_abs_boundary_faces) { // "-1" from index values to convert from Fortran-> C indexing ispec = abs_boundary_ispec[iface]-1; if(ispec_is_inner[ispec] == phase_is_inner && ispec_is_elastic[ispec] ) { i = abs_boundary_ijk[INDEX3(NDIM,NGLL2,0,igll,iface)]-1; j = abs_boundary_ijk[INDEX3(NDIM,NGLL2,1,igll,iface)]-1; k = abs_boundary_ijk[INDEX3(NDIM,NGLL2,2,igll,iface)]-1; iglob = ibool[INDEX4(NGLLX,NGLLX,NGLLX,i,j,k,ispec)]-1; // gets associated velocity vx = veloc[iglob*3+0]; vy = veloc[iglob*3+1]; vz = veloc[iglob*3+2]; // gets associated normal nx = abs_boundary_normal[INDEX3(NDIM,NGLL2,0,igll,iface)]; ny = abs_boundary_normal[INDEX3(NDIM,NGLL2,1,igll,iface)]; nz = abs_boundary_normal[INDEX3(NDIM,NGLL2,2,igll,iface)]; // // velocity component in normal direction (normal points out of element) vn = vx*nx + vy*ny + vz*nz; rho_vp_temp = rho_vp[INDEX4(NGLLX,NGLLX,NGLLX,i,j,k,ispec)]; rho_vs_temp = rho_vs[INDEX4(NGLLX,NGLLX,NGLLX,i,j,k,ispec)]; tx = rho_vp_temp*vn*nx + rho_vs_temp*(vx-vn*nx); ty = rho_vp_temp*vn*ny + rho_vs_temp*(vy-vn*ny); tz = rho_vp_temp*vn*nz + rho_vs_temp*(vz-vn*nz); jacobianw = abs_boundary_jacobian2Dw[INDEX2(NGLL2,igll,iface)]; atomicAdd(&accel[iglob*3],-tx*jacobianw); atomicAdd(&accel[iglob*3+1],-ty*jacobianw); atomicAdd(&accel[iglob*3+2],-tz*jacobianw); if(SAVE_FORWARD && SIMULATION_TYPE == 1) { b_absorb_field[INDEX3(NDIM,NGLL2,0,igll,iface)] = tx*jacobianw; b_absorb_field[INDEX3(NDIM,NGLL2,1,igll,iface)] = ty*jacobianw; b_absorb_field[INDEX3(NDIM,NGLL2,2,igll,iface)] = tz*jacobianw; } // SIMULATION_TYPE } } // num_abs_boundary_faces } /* ----------------------------------------------------------------------------------------------- */ __global__ void compute_stacey_elastic_sim3_kernel(int* abs_boundary_ispec, int* abs_boundary_ijk, int* ibool, int* ispec_is_inner, int* ispec_is_elastic, int phase_is_inner, int num_abs_boundary_faces, realw* b_accel, realw* b_absorb_field) { int igll = threadIdx.x; // tx int iface = blockIdx.x + gridDim.x*blockIdx.y; // bx int i,j,k,iglob,ispec; // don't compute points outside NGLLSQUARE==NGLL2==25 // way 2: no further check needed since blocksize = 25 if( iface < num_abs_boundary_faces){ //if(igll < NGLL2 && iface < num_abs_boundary_faces) { // "-1" from index values to convert from Fortran-> C indexing ispec = abs_boundary_ispec[iface]-1; if(ispec_is_inner[ispec] == phase_is_inner && ispec_is_elastic[ispec] ) { i = abs_boundary_ijk[INDEX3(NDIM,NGLL2,0,igll,iface)]-1; j = abs_boundary_ijk[INDEX3(NDIM,NGLL2,1,igll,iface)]-1; k = abs_boundary_ijk[INDEX3(NDIM,NGLL2,2,igll,iface)]-1; iglob = ibool[INDEX4(NGLLX,NGLLX,NGLLX,i,j,k,ispec)]-1; atomicAdd(&b_accel[iglob*3 ],-b_absorb_field[INDEX3(NDIM,NGLL2,0,igll,iface)]); atomicAdd(&b_accel[iglob*3+1],-b_absorb_field[INDEX3(NDIM,NGLL2,1,igll,iface)]); atomicAdd(&b_accel[iglob*3+2],-b_absorb_field[INDEX3(NDIM,NGLL2,2,igll,iface)]); } } // num_abs_boundary_faces } /* ----------------------------------------------------------------------------------------------- */ extern "C" void FC_FUNC_(compute_stacey_viscoelastic_cuda, COMPUTE_STACEY_VISCOELASTIC_CUDA)(long* Mesh_pointer, int* phase_is_innerf, realw* b_absorb_field) { TRACE("\tcompute_stacey_viscoelastic_cuda"); Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container // checks if anything to do if( mp->d_num_abs_boundary_faces == 0 ) return; int phase_is_inner = *phase_is_innerf; // way 1 // > NGLLSQUARE==NGLL2==25, but we handle this inside kernel //int blocksize = 32; // way 2: seems sligthly faster // > NGLLSQUARE==NGLL2==25, no further check inside kernel int blocksize = NGLL2; int num_blocks_x, num_blocks_y; get_blocks_xy(mp->d_num_abs_boundary_faces,&num_blocks_x,&num_blocks_y); dim3 grid(num_blocks_x,num_blocks_y); dim3 threads(blocksize,1,1); if(mp->simulation_type == 3 ) { // reading is done in fortran routine print_CUDA_error_if_any(hipMemcpy(mp->d_b_absorb_field,b_absorb_field, mp->d_b_reclen_field,hipMemcpyHostToDevice),7700); } #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("between cudamemcpy and compute_stacey_elastic_kernel"); #endif hipLaunchKernelGGL(( compute_stacey_elastic_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_veloc, mp->d_accel, mp->d_abs_boundary_ispec, mp->d_abs_boundary_ijk, mp->d_abs_boundary_normal, mp->d_abs_boundary_jacobian2Dw, mp->d_ibool, mp->d_rho_vp, mp->d_rho_vs, mp->d_ispec_is_inner, mp->d_ispec_is_elastic, phase_is_inner, mp->simulation_type, mp->save_forward, mp->d_num_abs_boundary_faces, mp->d_b_absorb_field); // adjoint simulations if(mp->simulation_type == 3 ){ hipLaunchKernelGGL(( compute_stacey_elastic_sim3_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_abs_boundary_ispec, mp->d_abs_boundary_ijk, mp->d_ibool, mp->d_ispec_is_inner, mp->d_ispec_is_elastic, phase_is_inner, mp->d_num_abs_boundary_faces, mp->d_b_accel, mp->d_b_absorb_field); } #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("compute_stacey_elastic_kernel"); #endif // ! adjoint simulations: stores absorbed wavefield part // if (mp->simulation_type == 1 .and. SAVE_FORWARD .and. num_abs_boundary_faces > 0 ) & // write(IOABS,rec=it) b_reclen_field,b_absorb_field,b_reclen_field if(mp->simulation_type == 1 && mp->save_forward ) { // explicitly wait until compute stream is done // (hipMemcpy implicitly synchronizes all other cuda operations) hipStreamSynchronize(mp->compute_stream); // copies absorb_field values to CPU print_CUDA_error_if_any(hipMemcpy(b_absorb_field,mp->d_b_absorb_field, mp->d_b_reclen_field,hipMemcpyDeviceToHost),7701); // writing is done in fortran routine } #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("after compute_stacey_elastic after cudamemcpy"); #endif }
c0be030d54bf00d7b62041e33b093fc0ff9c0799.cu
/* !===================================================================== ! ! S p e c f e m 3 D V e r s i o n 2 . 1 ! --------------------------------------- ! ! Main authors: Dimitri Komatitsch and Jeroen Tromp ! Princeton University, USA and CNRS / INRIA / University of Pau ! (c) Princeton University / California Institute of Technology and CNRS / INRIA / University of Pau ! July 2012 ! ! This program is free software; you can redistribute it and/or modify ! it under the terms of the GNU General Public License as published by ! the Free Software Foundation; either version 2 of the License, or ! (at your option) any later version. ! ! This program is distributed in the hope that it will be useful, ! but WITHOUT ANY WARRANTY; without even the implied warranty of ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ! GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License along ! with this program; if not, write to the Free Software Foundation, Inc., ! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ! !===================================================================== */ #include <stdio.h> #include <cuda.h> #include <cublas.h> #include <sys/time.h> #include <sys/resource.h> #include "config.h" #include "mesh_constants_cuda.h" /* ----------------------------------------------------------------------------------------------- */ __global__ void compute_stacey_elastic_kernel(realw* veloc, realw* accel, int* abs_boundary_ispec, int* abs_boundary_ijk, realw* abs_boundary_normal, realw* abs_boundary_jacobian2Dw, int* ibool, realw* rho_vp, realw* rho_vs, int* ispec_is_inner, int* ispec_is_elastic, int phase_is_inner, int SIMULATION_TYPE, int SAVE_FORWARD, int num_abs_boundary_faces, realw* b_absorb_field) { int igll = threadIdx.x; // tx int iface = blockIdx.x + gridDim.x*blockIdx.y; // bx int i,j,k,iglob,ispec; realw vx,vy,vz,vn; realw nx,ny,nz; realw rho_vp_temp,rho_vs_temp; realw tx,ty,tz; realw jacobianw; // don't compute points outside NGLLSQUARE==NGLL2==25 // way 2: no further check needed since blocksize = 25 if( iface < num_abs_boundary_faces){ //if(igll < NGLL2 && iface < num_abs_boundary_faces) { // "-1" from index values to convert from Fortran-> C indexing ispec = abs_boundary_ispec[iface]-1; if(ispec_is_inner[ispec] == phase_is_inner && ispec_is_elastic[ispec] ) { i = abs_boundary_ijk[INDEX3(NDIM,NGLL2,0,igll,iface)]-1; j = abs_boundary_ijk[INDEX3(NDIM,NGLL2,1,igll,iface)]-1; k = abs_boundary_ijk[INDEX3(NDIM,NGLL2,2,igll,iface)]-1; iglob = ibool[INDEX4(NGLLX,NGLLX,NGLLX,i,j,k,ispec)]-1; // gets associated velocity vx = veloc[iglob*3+0]; vy = veloc[iglob*3+1]; vz = veloc[iglob*3+2]; // gets associated normal nx = abs_boundary_normal[INDEX3(NDIM,NGLL2,0,igll,iface)]; ny = abs_boundary_normal[INDEX3(NDIM,NGLL2,1,igll,iface)]; nz = abs_boundary_normal[INDEX3(NDIM,NGLL2,2,igll,iface)]; // // velocity component in normal direction (normal points out of element) vn = vx*nx + vy*ny + vz*nz; rho_vp_temp = rho_vp[INDEX4(NGLLX,NGLLX,NGLLX,i,j,k,ispec)]; rho_vs_temp = rho_vs[INDEX4(NGLLX,NGLLX,NGLLX,i,j,k,ispec)]; tx = rho_vp_temp*vn*nx + rho_vs_temp*(vx-vn*nx); ty = rho_vp_temp*vn*ny + rho_vs_temp*(vy-vn*ny); tz = rho_vp_temp*vn*nz + rho_vs_temp*(vz-vn*nz); jacobianw = abs_boundary_jacobian2Dw[INDEX2(NGLL2,igll,iface)]; atomicAdd(&accel[iglob*3],-tx*jacobianw); atomicAdd(&accel[iglob*3+1],-ty*jacobianw); atomicAdd(&accel[iglob*3+2],-tz*jacobianw); if(SAVE_FORWARD && SIMULATION_TYPE == 1) { b_absorb_field[INDEX3(NDIM,NGLL2,0,igll,iface)] = tx*jacobianw; b_absorb_field[INDEX3(NDIM,NGLL2,1,igll,iface)] = ty*jacobianw; b_absorb_field[INDEX3(NDIM,NGLL2,2,igll,iface)] = tz*jacobianw; } // SIMULATION_TYPE } } // num_abs_boundary_faces } /* ----------------------------------------------------------------------------------------------- */ __global__ void compute_stacey_elastic_sim3_kernel(int* abs_boundary_ispec, int* abs_boundary_ijk, int* ibool, int* ispec_is_inner, int* ispec_is_elastic, int phase_is_inner, int num_abs_boundary_faces, realw* b_accel, realw* b_absorb_field) { int igll = threadIdx.x; // tx int iface = blockIdx.x + gridDim.x*blockIdx.y; // bx int i,j,k,iglob,ispec; // don't compute points outside NGLLSQUARE==NGLL2==25 // way 2: no further check needed since blocksize = 25 if( iface < num_abs_boundary_faces){ //if(igll < NGLL2 && iface < num_abs_boundary_faces) { // "-1" from index values to convert from Fortran-> C indexing ispec = abs_boundary_ispec[iface]-1; if(ispec_is_inner[ispec] == phase_is_inner && ispec_is_elastic[ispec] ) { i = abs_boundary_ijk[INDEX3(NDIM,NGLL2,0,igll,iface)]-1; j = abs_boundary_ijk[INDEX3(NDIM,NGLL2,1,igll,iface)]-1; k = abs_boundary_ijk[INDEX3(NDIM,NGLL2,2,igll,iface)]-1; iglob = ibool[INDEX4(NGLLX,NGLLX,NGLLX,i,j,k,ispec)]-1; atomicAdd(&b_accel[iglob*3 ],-b_absorb_field[INDEX3(NDIM,NGLL2,0,igll,iface)]); atomicAdd(&b_accel[iglob*3+1],-b_absorb_field[INDEX3(NDIM,NGLL2,1,igll,iface)]); atomicAdd(&b_accel[iglob*3+2],-b_absorb_field[INDEX3(NDIM,NGLL2,2,igll,iface)]); } } // num_abs_boundary_faces } /* ----------------------------------------------------------------------------------------------- */ extern "C" void FC_FUNC_(compute_stacey_viscoelastic_cuda, COMPUTE_STACEY_VISCOELASTIC_CUDA)(long* Mesh_pointer, int* phase_is_innerf, realw* b_absorb_field) { TRACE("\tcompute_stacey_viscoelastic_cuda"); Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container // checks if anything to do if( mp->d_num_abs_boundary_faces == 0 ) return; int phase_is_inner = *phase_is_innerf; // way 1 // > NGLLSQUARE==NGLL2==25, but we handle this inside kernel //int blocksize = 32; // way 2: seems sligthly faster // > NGLLSQUARE==NGLL2==25, no further check inside kernel int blocksize = NGLL2; int num_blocks_x, num_blocks_y; get_blocks_xy(mp->d_num_abs_boundary_faces,&num_blocks_x,&num_blocks_y); dim3 grid(num_blocks_x,num_blocks_y); dim3 threads(blocksize,1,1); if(mp->simulation_type == 3 ) { // reading is done in fortran routine print_CUDA_error_if_any(cudaMemcpy(mp->d_b_absorb_field,b_absorb_field, mp->d_b_reclen_field,cudaMemcpyHostToDevice),7700); } #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("between cudamemcpy and compute_stacey_elastic_kernel"); #endif compute_stacey_elastic_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_veloc, mp->d_accel, mp->d_abs_boundary_ispec, mp->d_abs_boundary_ijk, mp->d_abs_boundary_normal, mp->d_abs_boundary_jacobian2Dw, mp->d_ibool, mp->d_rho_vp, mp->d_rho_vs, mp->d_ispec_is_inner, mp->d_ispec_is_elastic, phase_is_inner, mp->simulation_type, mp->save_forward, mp->d_num_abs_boundary_faces, mp->d_b_absorb_field); // adjoint simulations if(mp->simulation_type == 3 ){ compute_stacey_elastic_sim3_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_abs_boundary_ispec, mp->d_abs_boundary_ijk, mp->d_ibool, mp->d_ispec_is_inner, mp->d_ispec_is_elastic, phase_is_inner, mp->d_num_abs_boundary_faces, mp->d_b_accel, mp->d_b_absorb_field); } #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("compute_stacey_elastic_kernel"); #endif // ! adjoint simulations: stores absorbed wavefield part // if (mp->simulation_type == 1 .and. SAVE_FORWARD .and. num_abs_boundary_faces > 0 ) & // write(IOABS,rec=it) b_reclen_field,b_absorb_field,b_reclen_field if(mp->simulation_type == 1 && mp->save_forward ) { // explicitly wait until compute stream is done // (cudaMemcpy implicitly synchronizes all other cuda operations) cudaStreamSynchronize(mp->compute_stream); // copies absorb_field values to CPU print_CUDA_error_if_any(cudaMemcpy(b_absorb_field,mp->d_b_absorb_field, mp->d_b_reclen_field,cudaMemcpyDeviceToHost),7701); // writing is done in fortran routine } #ifdef ENABLE_VERY_SLOW_ERROR_CHECKING exit_on_cuda_error("after compute_stacey_elastic after cudamemcpy"); #endif }
96a65b2e13a8d16dac7ab1e5cd0fdde0d3ccef07.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <algorithm> #include <cuda_utils.cuh> #include <random/permute.cuh> #include <random/rng.cuh> #include <vector> #include "test_utils.h" namespace MLCommon { namespace Random { template <typename T> struct PermInputs { int N, D; bool needPerms, needShuffle, rowMajor; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const PermInputs<T> &dims) { return os; } template <typename T> class PermTest : public ::testing::TestWithParam<PermInputs<T>> { protected: void SetUp() override { CUDA_CHECK(hipStreamCreate(&stream)); params = ::testing::TestWithParam<PermInputs<T>>::GetParam(); // forcefully set needPerms, since we need it for unit-testing! if (params.needShuffle) { params.needPerms = true; } raft::random::Rng r(params.seed); int N = params.N; int D = params.D; int len = N * D; hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); if (params.needPerms) raft::allocate(outPerms, N); else outPerms = nullptr; if (params.needShuffle) { raft::allocate(in, len); raft::allocate(out, len); r.uniform(in, len, T(-1.0), T(1.0), stream); } else { in = out = nullptr; } permute(outPerms, out, in, D, N, params.rowMajor, stream); CUDA_CHECK(hipStreamSynchronize(stream)); } void TearDown() override { if (params.needPerms) CUDA_CHECK(hipFree(outPerms)); if (params.needShuffle) { CUDA_CHECK(hipFree(in)); CUDA_CHECK(hipFree(out)); } CUDA_CHECK(hipStreamDestroy(stream)); } protected: PermInputs<T> params; T *in, *out; int *outPerms; hipStream_t stream; }; template <typename T, typename L> ::testing::AssertionResult devArrMatchRange(const T *actual, size_t size, T start, L eq_compare, bool doSort = true, hipStream_t stream = 0) { std::vector<T> act_h(size); raft::update_host<T>(&(act_h[0]), actual, size, stream); CUDA_CHECK(hipStreamSynchronize(stream)); if (doSort) std::sort(act_h.begin(), act_h.end()); for (size_t i(0); i < size; ++i) { auto act = act_h[i]; auto expected = start + i; if (!eq_compare(expected, act)) { return ::testing::AssertionFailure() << "actual=" << act << " != expected=" << expected << " @" << i; } } return ::testing::AssertionSuccess(); } template <typename T, typename L> ::testing::AssertionResult devArrMatchShuffle(const int *perms, const T *out, const T *in, int D, int N, bool rowMajor, L eq_compare, hipStream_t stream = 0) { std::vector<int> h_perms(N); raft::update_host<int>(&(h_perms[0]), perms, N, stream); std::vector<T> h_out(N * D), h_in(N * D); raft::update_host<T>(&(h_out[0]), out, N * D, stream); raft::update_host<T>(&(h_in[0]), in, N * D, stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int i = 0; i < N; ++i) { for (int j = 0; j < D; ++j) { int outPos = rowMajor ? i * D + j : j * N + i; int inPos = rowMajor ? h_perms[i] * D + j : j * N + h_perms[i]; auto act = h_out[outPos]; auto expected = h_in[inPos]; if (!eq_compare(expected, act)) { return ::testing::AssertionFailure() << "actual=" << act << " != expected=" << expected << " @" << i << ", " << j; } } } return ::testing::AssertionSuccess(); } const std::vector<PermInputs<float>> inputsf = { // only generate permutations {32, 8, true, false, true, 1234ULL}, {32, 8, true, false, true, 1234567890ULL}, {1024, 32, true, false, true, 1234ULL}, {1024, 32, true, false, true, 1234567890ULL}, {2 * 1024, 32, true, false, true, 1234ULL}, {2 * 1024, 32, true, false, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, false, true, 1234ULL}, {2 * 1024 + 500, 32, true, false, true, 1234567890ULL}, {100000, 32, true, false, true, 1234ULL}, {100000, 32, true, false, true, 1234567890ULL}, {100001, 33, true, false, true, 1234567890ULL}, // permute and shuffle the data row major {32, 8, true, true, true, 1234ULL}, {32, 8, true, true, true, 1234567890ULL}, {1024, 32, true, true, true, 1234ULL}, {1024, 32, true, true, true, 1234567890ULL}, {2 * 1024, 32, true, true, true, 1234ULL}, {2 * 1024, 32, true, true, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, true, 1234ULL}, {2 * 1024 + 500, 32, true, true, true, 1234567890ULL}, {100000, 32, true, true, true, 1234ULL}, {100000, 32, true, true, true, 1234567890ULL}, {100001, 31, true, true, true, 1234567890ULL}, // permute and shuffle the data column major {32, 8, true, true, false, 1234ULL}, {32, 8, true, true, false, 1234567890ULL}, {1024, 32, true, true, false, 1234ULL}, {1024, 32, true, true, false, 1234567890ULL}, {2 * 1024, 32, true, true, false, 1234ULL}, {2 * 1024, 32, true, true, false, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, false, 1234ULL}, {2 * 1024 + 500, 32, true, true, false, 1234567890ULL}, {100000, 32, true, true, false, 1234ULL}, {100000, 32, true, true, false, 1234567890ULL}, {100001, 33, true, true, false, 1234567890ULL}}; typedef PermTest<float> PermTestF; TEST_P(PermTestF, Result) { if (params.needPerms) { ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, raft::Compare<int>())); } if (params.needShuffle) { ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N, params.rowMajor, raft::Compare<float>())); } } INSTANTIATE_TEST_CASE_P(PermTests, PermTestF, ::testing::ValuesIn(inputsf)); const std::vector<PermInputs<double>> inputsd = { // only generate permutations {32, 8, true, false, true, 1234ULL}, {32, 8, true, false, true, 1234567890ULL}, {1024, 32, true, false, true, 1234ULL}, {1024, 32, true, false, true, 1234567890ULL}, {2 * 1024, 32, true, false, true, 1234ULL}, {2 * 1024, 32, true, false, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, false, true, 1234ULL}, {2 * 1024 + 500, 32, true, false, true, 1234567890ULL}, {100000, 32, true, false, true, 1234ULL}, {100000, 32, true, false, true, 1234567890ULL}, {100001, 33, true, false, true, 1234567890ULL}, // permute and shuffle the data row major {32, 8, true, true, true, 1234ULL}, {32, 8, true, true, true, 1234567890ULL}, {1024, 32, true, true, true, 1234ULL}, {1024, 32, true, true, true, 1234567890ULL}, {2 * 1024, 32, true, true, true, 1234ULL}, {2 * 1024, 32, true, true, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, true, 1234ULL}, {2 * 1024 + 500, 32, true, true, true, 1234567890ULL}, {100000, 32, true, true, true, 1234ULL}, {100000, 32, true, true, true, 1234567890ULL}, {100001, 31, true, true, true, 1234567890ULL}, // permute and shuffle the data column major {32, 8, true, true, false, 1234ULL}, {32, 8, true, true, false, 1234567890ULL}, {1024, 32, true, true, false, 1234ULL}, {1024, 32, true, true, false, 1234567890ULL}, {2 * 1024, 32, true, true, false, 1234ULL}, {2 * 1024, 32, true, true, false, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, false, 1234ULL}, {2 * 1024 + 500, 32, true, true, false, 1234567890ULL}, {100000, 32, true, true, false, 1234ULL}, {100000, 32, true, true, false, 1234567890ULL}, {100001, 33, true, true, false, 1234567890ULL}}; typedef PermTest<double> PermTestD; TEST_P(PermTestD, Result) { if (params.needPerms) { ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, raft::Compare<int>())); } if (params.needShuffle) { ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N, params.rowMajor, raft::Compare<double>())); } } INSTANTIATE_TEST_CASE_P(PermTests, PermTestD, ::testing::ValuesIn(inputsd)); } // end namespace Random } // end namespace MLCommon
96a65b2e13a8d16dac7ab1e5cd0fdde0d3ccef07.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <algorithm> #include <cuda_utils.cuh> #include <random/permute.cuh> #include <random/rng.cuh> #include <vector> #include "test_utils.h" namespace MLCommon { namespace Random { template <typename T> struct PermInputs { int N, D; bool needPerms, needShuffle, rowMajor; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const PermInputs<T> &dims) { return os; } template <typename T> class PermTest : public ::testing::TestWithParam<PermInputs<T>> { protected: void SetUp() override { CUDA_CHECK(cudaStreamCreate(&stream)); params = ::testing::TestWithParam<PermInputs<T>>::GetParam(); // forcefully set needPerms, since we need it for unit-testing! if (params.needShuffle) { params.needPerms = true; } raft::random::Rng r(params.seed); int N = params.N; int D = params.D; int len = N * D; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); if (params.needPerms) raft::allocate(outPerms, N); else outPerms = nullptr; if (params.needShuffle) { raft::allocate(in, len); raft::allocate(out, len); r.uniform(in, len, T(-1.0), T(1.0), stream); } else { in = out = nullptr; } permute(outPerms, out, in, D, N, params.rowMajor, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); } void TearDown() override { if (params.needPerms) CUDA_CHECK(cudaFree(outPerms)); if (params.needShuffle) { CUDA_CHECK(cudaFree(in)); CUDA_CHECK(cudaFree(out)); } CUDA_CHECK(cudaStreamDestroy(stream)); } protected: PermInputs<T> params; T *in, *out; int *outPerms; cudaStream_t stream; }; template <typename T, typename L> ::testing::AssertionResult devArrMatchRange(const T *actual, size_t size, T start, L eq_compare, bool doSort = true, cudaStream_t stream = 0) { std::vector<T> act_h(size); raft::update_host<T>(&(act_h[0]), actual, size, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); if (doSort) std::sort(act_h.begin(), act_h.end()); for (size_t i(0); i < size; ++i) { auto act = act_h[i]; auto expected = start + i; if (!eq_compare(expected, act)) { return ::testing::AssertionFailure() << "actual=" << act << " != expected=" << expected << " @" << i; } } return ::testing::AssertionSuccess(); } template <typename T, typename L> ::testing::AssertionResult devArrMatchShuffle(const int *perms, const T *out, const T *in, int D, int N, bool rowMajor, L eq_compare, cudaStream_t stream = 0) { std::vector<int> h_perms(N); raft::update_host<int>(&(h_perms[0]), perms, N, stream); std::vector<T> h_out(N * D), h_in(N * D); raft::update_host<T>(&(h_out[0]), out, N * D, stream); raft::update_host<T>(&(h_in[0]), in, N * D, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int i = 0; i < N; ++i) { for (int j = 0; j < D; ++j) { int outPos = rowMajor ? i * D + j : j * N + i; int inPos = rowMajor ? h_perms[i] * D + j : j * N + h_perms[i]; auto act = h_out[outPos]; auto expected = h_in[inPos]; if (!eq_compare(expected, act)) { return ::testing::AssertionFailure() << "actual=" << act << " != expected=" << expected << " @" << i << ", " << j; } } } return ::testing::AssertionSuccess(); } const std::vector<PermInputs<float>> inputsf = { // only generate permutations {32, 8, true, false, true, 1234ULL}, {32, 8, true, false, true, 1234567890ULL}, {1024, 32, true, false, true, 1234ULL}, {1024, 32, true, false, true, 1234567890ULL}, {2 * 1024, 32, true, false, true, 1234ULL}, {2 * 1024, 32, true, false, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, false, true, 1234ULL}, {2 * 1024 + 500, 32, true, false, true, 1234567890ULL}, {100000, 32, true, false, true, 1234ULL}, {100000, 32, true, false, true, 1234567890ULL}, {100001, 33, true, false, true, 1234567890ULL}, // permute and shuffle the data row major {32, 8, true, true, true, 1234ULL}, {32, 8, true, true, true, 1234567890ULL}, {1024, 32, true, true, true, 1234ULL}, {1024, 32, true, true, true, 1234567890ULL}, {2 * 1024, 32, true, true, true, 1234ULL}, {2 * 1024, 32, true, true, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, true, 1234ULL}, {2 * 1024 + 500, 32, true, true, true, 1234567890ULL}, {100000, 32, true, true, true, 1234ULL}, {100000, 32, true, true, true, 1234567890ULL}, {100001, 31, true, true, true, 1234567890ULL}, // permute and shuffle the data column major {32, 8, true, true, false, 1234ULL}, {32, 8, true, true, false, 1234567890ULL}, {1024, 32, true, true, false, 1234ULL}, {1024, 32, true, true, false, 1234567890ULL}, {2 * 1024, 32, true, true, false, 1234ULL}, {2 * 1024, 32, true, true, false, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, false, 1234ULL}, {2 * 1024 + 500, 32, true, true, false, 1234567890ULL}, {100000, 32, true, true, false, 1234ULL}, {100000, 32, true, true, false, 1234567890ULL}, {100001, 33, true, true, false, 1234567890ULL}}; typedef PermTest<float> PermTestF; TEST_P(PermTestF, Result) { if (params.needPerms) { ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, raft::Compare<int>())); } if (params.needShuffle) { ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N, params.rowMajor, raft::Compare<float>())); } } INSTANTIATE_TEST_CASE_P(PermTests, PermTestF, ::testing::ValuesIn(inputsf)); const std::vector<PermInputs<double>> inputsd = { // only generate permutations {32, 8, true, false, true, 1234ULL}, {32, 8, true, false, true, 1234567890ULL}, {1024, 32, true, false, true, 1234ULL}, {1024, 32, true, false, true, 1234567890ULL}, {2 * 1024, 32, true, false, true, 1234ULL}, {2 * 1024, 32, true, false, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, false, true, 1234ULL}, {2 * 1024 + 500, 32, true, false, true, 1234567890ULL}, {100000, 32, true, false, true, 1234ULL}, {100000, 32, true, false, true, 1234567890ULL}, {100001, 33, true, false, true, 1234567890ULL}, // permute and shuffle the data row major {32, 8, true, true, true, 1234ULL}, {32, 8, true, true, true, 1234567890ULL}, {1024, 32, true, true, true, 1234ULL}, {1024, 32, true, true, true, 1234567890ULL}, {2 * 1024, 32, true, true, true, 1234ULL}, {2 * 1024, 32, true, true, true, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, true, 1234ULL}, {2 * 1024 + 500, 32, true, true, true, 1234567890ULL}, {100000, 32, true, true, true, 1234ULL}, {100000, 32, true, true, true, 1234567890ULL}, {100001, 31, true, true, true, 1234567890ULL}, // permute and shuffle the data column major {32, 8, true, true, false, 1234ULL}, {32, 8, true, true, false, 1234567890ULL}, {1024, 32, true, true, false, 1234ULL}, {1024, 32, true, true, false, 1234567890ULL}, {2 * 1024, 32, true, true, false, 1234ULL}, {2 * 1024, 32, true, true, false, 1234567890ULL}, {2 * 1024 + 500, 32, true, true, false, 1234ULL}, {2 * 1024 + 500, 32, true, true, false, 1234567890ULL}, {100000, 32, true, true, false, 1234ULL}, {100000, 32, true, true, false, 1234567890ULL}, {100001, 33, true, true, false, 1234567890ULL}}; typedef PermTest<double> PermTestD; TEST_P(PermTestD, Result) { if (params.needPerms) { ASSERT_TRUE(devArrMatchRange(outPerms, params.N, 0, raft::Compare<int>())); } if (params.needShuffle) { ASSERT_TRUE(devArrMatchShuffle(outPerms, out, in, params.D, params.N, params.rowMajor, raft::Compare<double>())); } } INSTANTIATE_TEST_CASE_P(PermTests, PermTestD, ::testing::ValuesIn(inputsd)); } // end namespace Random } // end namespace MLCommon
37efde53ce763c631bfa8a1f539eee3e8c555482.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/softsign_op.h" #include <algorithm> #include <functional> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> inline __host__ __device__ T SquareCUDA(const T x) { return x * x; } template <typename T> inline __device__ T typed_abs(T x); template <> inline __device__ float typed_abs(float x) { return fabsf(x); } // Avoid compiler warning. <double> specification currently not used. // template <> // inline __device__ double typed_abs(double x) { // return fabs(x); // } template <typename T> __global__ void SoftsignCUDAKernel(const int N, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 Y[i] = __ldg(X + i) / (T(1) + typed_abs(__ldg(X + i))); #else Y[i] = X[i] / (T(1) + typed_abs(X[i])); #endif } } template <typename T> __global__ void SoftsignGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 dX[i] = __ldg(dY + i) / SquareCUDA(T(1) + typed_abs(__ldg(X + i))); #else dX[i] = dY[i] / SquareCUDA(T(1) + typed_abs(X[i])); #endif } } } // namespace template <> template <typename T> bool SoftsignFunctor<CUDAContext>:: operator()(const int N, const T* X, T* Y, CUDAContext* context) const { hipLaunchKernelGGL(( SoftsignCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, X, Y); return true; } template <> template <typename T> bool SoftsignGradientFunctor<CUDAContext>::Forward( const std::vector<int>& X_dims, const std::vector<int>& /* dY_dims */, const T* X, const T* dY, T* dX, CUDAContext* context) const { const int size = std::accumulate( X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>()); hipLaunchKernelGGL(( SoftsignGradientCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, dY, X, dX); return true; } REGISTER_CUDA_OPERATOR( Softsign, UnaryElementwiseOp< TensorTypes<float>, CUDAContext, SoftsignFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( SoftsignGradient, BinaryElementwiseOp< TensorTypes<float>, CUDAContext, SoftsignGradientFunctor<CUDAContext>>); } // namespace caffe2
37efde53ce763c631bfa8a1f539eee3e8c555482.cu
#include "caffe2/operators/softsign_op.h" #include <algorithm> #include <functional> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> inline __host__ __device__ T SquareCUDA(const T x) { return x * x; } template <typename T> inline __device__ T typed_abs(T x); template <> inline __device__ float typed_abs(float x) { return fabsf(x); } // Avoid compiler warning. <double> specification currently not used. // template <> // inline __device__ double typed_abs(double x) { // return fabs(x); // } template <typename T> __global__ void SoftsignCUDAKernel(const int N, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 Y[i] = __ldg(X + i) / (T(1) + typed_abs(__ldg(X + i))); #else Y[i] = X[i] / (T(1) + typed_abs(X[i])); #endif } } template <typename T> __global__ void SoftsignGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 dX[i] = __ldg(dY + i) / SquareCUDA(T(1) + typed_abs(__ldg(X + i))); #else dX[i] = dY[i] / SquareCUDA(T(1) + typed_abs(X[i])); #endif } } } // namespace template <> template <typename T> bool SoftsignFunctor<CUDAContext>:: operator()(const int N, const T* X, T* Y, CUDAContext* context) const { SoftsignCUDAKernel<T> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, X, Y); return true; } template <> template <typename T> bool SoftsignGradientFunctor<CUDAContext>::Forward( const std::vector<int>& X_dims, const std::vector<int>& /* dY_dims */, const T* X, const T* dY, T* dX, CUDAContext* context) const { const int size = std::accumulate( X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>()); SoftsignGradientCUDAKernel<T> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, dY, X, dX); return true; } REGISTER_CUDA_OPERATOR( Softsign, UnaryElementwiseOp< TensorTypes<float>, CUDAContext, SoftsignFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( SoftsignGradient, BinaryElementwiseOp< TensorTypes<float>, CUDAContext, SoftsignGradientFunctor<CUDAContext>>); } // namespace caffe2
7cb2c5b0bb8e12e09c7178c5f67a3777d93672e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "bench.h" int measure_fma_lldr_coissue(bool verbose, int mhz) { const size_t block_num = 8192; const size_t threads_per_block = 1024; const size_t fma_per_thread = 64 * 64; const long long total_fma = block_num * threads_per_block * fma_per_thread; float *p; hipMalloc(&p, sizeof(float) * block_num * threads_per_block); hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, NULL); float scale = 1.0f; hipLaunchKernelGGL(( fma_lldr_coissue), dim3(block_num), dim3(threads_per_block), 0, 0, p, scale); int err = hipGetLastError(); if (err != hipSuccess) { std::cerr << "failed to execute kernel" << std::endl; } hipEventRecord(stop, NULL); hipEventSynchronize(stop); float ellapse_ms = 0.0f; hipEventElapsedTime(&ellapse_ms, start, stop); hipFree(p); if (verbose) { std::cerr << ellapse_ms << " ms" << std::endl; std::cerr << static_cast<double>(ellapse_ms) * 1e-3 * mhz * 1e6 * 5 * 128 / total_fma << std::endl; std::cerr << 2.0 * (total_fma * 1.0e-9) / (ellapse_ms * 1e-3) << " gflops" << std::endl; } return 0; } int measure_fmax2_lldr_coissue(bool verbose, int mhz) { const size_t block_num = 8192; const size_t threads_per_block = 1024; const size_t fma_per_thread = 64 * 64 * 2; const long long total_fma = block_num * threads_per_block * fma_per_thread; float *p; hipMalloc(&p, sizeof(float) * block_num * threads_per_block); hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, NULL); float scale = 1.0f; hipLaunchKernelGGL(( fmax2_lldr_coissue), dim3(block_num), dim3(threads_per_block), 0, 0, p, scale, scale); int err = hipGetLastError(); if (err != hipSuccess) { std::cerr << "failed to execute kernel" << std::endl; } hipEventRecord(stop, NULL); hipEventSynchronize(stop); float ellapse_ms = 0.0f; hipEventElapsedTime(&ellapse_ms, start, stop); hipFree(p); if (verbose) { std::cerr << ellapse_ms << " ms" << std::endl; std::cerr << static_cast<double>(ellapse_ms) * 1e-3 * mhz * 1e6 * 5 * 128 / total_fma << std::endl; std::cerr << 2.0 * (total_fma * 1.0e-9) / (ellapse_ms * 1e-3) << " gflops" << std::endl; } return 0; } int measure_fmax4_lldr_coissue(bool verbose, int mhz) { const size_t block_num = 8192; const size_t threads_per_block = 1024; const size_t fma_per_thread = 64 * 64 * 4; const long long total_fma = block_num * threads_per_block * fma_per_thread; float *p; hipMalloc(&p, sizeof(float) * block_num * threads_per_block); hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, NULL); float scale = 1.0f; hipLaunchKernelGGL(( fmax4_lldr_coissue), dim3(block_num), dim3(threads_per_block), 0, 0, p, scale, scale, scale, scale); int err = hipGetLastError(); if (err != hipSuccess) { std::cerr << "failed to execute kernel" << std::endl; } hipEventRecord(stop, NULL); hipEventSynchronize(stop); float ellapse_ms = 0.0f; hipEventElapsedTime(&ellapse_ms, start, stop); hipFree(p); if (verbose) { std::cerr << ellapse_ms << " ms" << std::endl; std::cerr << static_cast<double>(ellapse_ms) * 1e-3 * mhz * 1e6 * 5 * 128 / total_fma << std::endl; std::cerr << 2.0 * (total_fma * 1.0e-9) / (ellapse_ms * 1e-3) << " gflops" << std::endl; } return 0; } #ifdef LOCAL_MAIN int main(void) { // measure_fmax2_lldr_coissue(true, 1176); measure_fmax4_lldr_coissue(true, 1176); return 0; } #endif
7cb2c5b0bb8e12e09c7178c5f67a3777d93672e1.cu
#include "common.h" #include "bench.h" int measure_fma_lldr_coissue(bool verbose, int mhz) { const size_t block_num = 8192; const size_t threads_per_block = 1024; const size_t fma_per_thread = 64 * 64; const long long total_fma = block_num * threads_per_block * fma_per_thread; float *p; cudaMalloc(&p, sizeof(float) * block_num * threads_per_block); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); float scale = 1.0f; fma_lldr_coissue<<<block_num, threads_per_block>>>(p, scale); int err = cudaGetLastError(); if (err != cudaSuccess) { std::cerr << "failed to execute kernel" << std::endl; } cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float ellapse_ms = 0.0f; cudaEventElapsedTime(&ellapse_ms, start, stop); cudaFree(p); if (verbose) { std::cerr << ellapse_ms << " ms" << std::endl; std::cerr << static_cast<double>(ellapse_ms) * 1e-3 * mhz * 1e6 * 5 * 128 / total_fma << std::endl; std::cerr << 2.0 * (total_fma * 1.0e-9) / (ellapse_ms * 1e-3) << " gflops" << std::endl; } return 0; } int measure_fmax2_lldr_coissue(bool verbose, int mhz) { const size_t block_num = 8192; const size_t threads_per_block = 1024; const size_t fma_per_thread = 64 * 64 * 2; const long long total_fma = block_num * threads_per_block * fma_per_thread; float *p; cudaMalloc(&p, sizeof(float) * block_num * threads_per_block); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); float scale = 1.0f; fmax2_lldr_coissue<<<block_num, threads_per_block>>>(p, scale, scale); int err = cudaGetLastError(); if (err != cudaSuccess) { std::cerr << "failed to execute kernel" << std::endl; } cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float ellapse_ms = 0.0f; cudaEventElapsedTime(&ellapse_ms, start, stop); cudaFree(p); if (verbose) { std::cerr << ellapse_ms << " ms" << std::endl; std::cerr << static_cast<double>(ellapse_ms) * 1e-3 * mhz * 1e6 * 5 * 128 / total_fma << std::endl; std::cerr << 2.0 * (total_fma * 1.0e-9) / (ellapse_ms * 1e-3) << " gflops" << std::endl; } return 0; } int measure_fmax4_lldr_coissue(bool verbose, int mhz) { const size_t block_num = 8192; const size_t threads_per_block = 1024; const size_t fma_per_thread = 64 * 64 * 4; const long long total_fma = block_num * threads_per_block * fma_per_thread; float *p; cudaMalloc(&p, sizeof(float) * block_num * threads_per_block); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); float scale = 1.0f; fmax4_lldr_coissue<<<block_num, threads_per_block>>>(p, scale, scale, scale, scale); int err = cudaGetLastError(); if (err != cudaSuccess) { std::cerr << "failed to execute kernel" << std::endl; } cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float ellapse_ms = 0.0f; cudaEventElapsedTime(&ellapse_ms, start, stop); cudaFree(p); if (verbose) { std::cerr << ellapse_ms << " ms" << std::endl; std::cerr << static_cast<double>(ellapse_ms) * 1e-3 * mhz * 1e6 * 5 * 128 / total_fma << std::endl; std::cerr << 2.0 * (total_fma * 1.0e-9) / (ellapse_ms * 1e-3) << " gflops" << std::endl; } return 0; } #ifdef LOCAL_MAIN int main(void) { // measure_fmax2_lldr_coissue(true, 1176); measure_fmax4_lldr_coissue(true, 1176); return 0; } #endif
5c858896a540ecb31d202fbeb0c244b4341d0331.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> #include <limits> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void add_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.dtype(), "add_cuda/sub_cuda", [&]() { auto alpha = alpha_scalar.to<scalar_t>(); gpu_kernel_with_scalars(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a + alpha * b; }); }); } static void sub_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) { add_kernel_cuda(iter, -alpha_scalar); } void div_kernel_cuda(TensorIterator& iter) { if (!isIntegralType(iter.dtype()) && iter.is_cpu_scalar(2)) { // optimization for floating-point types: if the second operand is a CPU // scalar, compute a * reciprocal(b). Note that this may lose one bit of // precision compared to computing the division. AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "div_cuda", [&]() { auto inv_b = scalar_t(1.0 / iter.scalar_value<scalar_t>(2)); iter.remove_operand(2); gpu_kernel(iter, [inv_b]GPU_LAMBDA(scalar_t a) -> scalar_t { return a * inv_b; }); }); } else { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "div_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a / b; }); }); } } void mul_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { // Workaround for the error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context] gpu_kernel_with_scalars(iter, []GPU_LAMBDA(bool a, bool b) -> bool { return a && b; }); } else { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "mul_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * b; }); }); } } REGISTER_DISPATCH(add_stub, &add_kernel_cuda); REGISTER_DISPATCH(sub_stub, &sub_kernel_cuda); REGISTER_DISPATCH(div_stub, &div_kernel_cuda); REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda); }} // namespace at::native
5c858896a540ecb31d202fbeb0c244b4341d0331.cu
#include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> #include <limits> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void add_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.dtype(), "add_cuda/sub_cuda", [&]() { auto alpha = alpha_scalar.to<scalar_t>(); gpu_kernel_with_scalars(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a + alpha * b; }); }); } static void sub_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) { add_kernel_cuda(iter, -alpha_scalar); } void div_kernel_cuda(TensorIterator& iter) { if (!isIntegralType(iter.dtype()) && iter.is_cpu_scalar(2)) { // optimization for floating-point types: if the second operand is a CPU // scalar, compute a * reciprocal(b). Note that this may lose one bit of // precision compared to computing the division. AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "div_cuda", [&]() { auto inv_b = scalar_t(1.0 / iter.scalar_value<scalar_t>(2)); iter.remove_operand(2); gpu_kernel(iter, [inv_b]GPU_LAMBDA(scalar_t a) -> scalar_t { return a * inv_b; }); }); } else { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "div_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a / b; }); }); } } void mul_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { // Workaround for the error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context] gpu_kernel_with_scalars(iter, []GPU_LAMBDA(bool a, bool b) -> bool { return a && b; }); } else { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "mul_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * b; }); }); } } REGISTER_DISPATCH(add_stub, &add_kernel_cuda); REGISTER_DISPATCH(sub_stub, &sub_kernel_cuda); REGISTER_DISPATCH(div_stub, &div_kernel_cuda); REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda); }} // namespace at::native
afa028c38afab3ae8ff934307e37c381f7aefc95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/psroi_pool_op.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaximumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaximumNumBlocks); } template <typename T> __global__ void GPUPSROIPoolForward( const int nthreads, const T* input_data, const T* input_rois, const float spatial_scale, const int input_channels, const int height, const int width, const int output_channels, const int pooled_height, const int pooled_width, const int* rois_batch_id_data, T* output_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { // The output is in order (n, c, ph, pw) int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % output_channels; int n = i / pooled_width / pooled_height / output_channels; // set roi_batch_id int roi_batch_id = rois_batch_id_data[n]; // [start, end) interval for spatial sampling const T* offset_input_rois = input_rois + n * 4; T roi_start_w = static_cast<T>(round(offset_input_rois[0])) * spatial_scale; T roi_start_h = static_cast<T>(round(offset_input_rois[1])) * spatial_scale; T roi_end_w = static_cast<T>(round(offset_input_rois[2]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>(round(offset_input_rois[3]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_height = max(roi_end_h - roi_start_h, (T)0.1); // avoid 0 T roi_width = max(roi_end_w - roi_start_w, (T)0.1); // Compute w and h at input feature map T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor(bin_size_h * static_cast<T>(ph) + roi_start_h); int wstart = floor(bin_size_w * static_cast<T>(pw) + roi_start_w); int hend = ceil(bin_size_h * static_cast<T>(ph + 1) + roi_start_h); int wend = ceil(bin_size_w * static_cast<T>(pw + 1) + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int input_channel = (c * pooled_height + ph) * pooled_width + pw; const T* offset_input_data = input_data + (roi_batch_id * input_channels + input_channel) * height * width; T outsum = 0; for (int ih = hstart; ih < hend; ++ih) { for (int iw = wstart; iw < wend; ++iw) { int input_index = ih * width + iw; outsum += offset_input_data[input_index]; } } T bin_area = static_cast<T>((hend - hstart) * (wend - wstart)); output_data[i] = is_empty ? 0. : outsum / bin_area; } } template <typename T> __global__ void GPUPSROIPoolBackward( const int nthreads, const T* input_rois, const T* output_grad_data, const float spatial_scale, const int input_channels, const int height, const int width, const int output_channels, const int pooled_height, const int pooled_width, const int* rois_batch_id_data, T* input_grad_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { // The output is in order (n, c, ph, pw) int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % output_channels; int n = i / pooled_width / pooled_height / output_channels; // set roi_batch_id int roi_batch_id = rois_batch_id_data[n]; int input_channel = (c * pooled_height + ph) * pooled_width + pw; int input_offset = (roi_batch_id * input_channels + input_channel) * height * width; T* offset_input_grad_data = input_grad_data + input_offset; // [start, end) interval for spatial sampling const T* offset_input_rois = input_rois + n * 4; T roi_start_w = static_cast<T>(round(offset_input_rois[0])) * spatial_scale; T roi_start_h = static_cast<T>(round(offset_input_rois[1])) * spatial_scale; T roi_end_w = static_cast<T>(round(offset_input_rois[2]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>(round(offset_input_rois[3]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_height = max(roi_end_h - roi_start_h, (T)0.1); // avoid 0 T roi_width = max(roi_end_w - roi_start_w, (T)0.1); // Compute w and h at input feature map T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor(bin_size_h * static_cast<T>(ph) + roi_start_h); int wstart = floor(bin_size_w * static_cast<T>(pw) + roi_start_w); int hend = ceil(bin_size_h * static_cast<T>(ph + 1) + roi_start_h); int wend = ceil(bin_size_w * static_cast<T>(pw + 1) + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Accumulate diff_val into input data T bin_area = static_cast<T>((hend - hstart) * (wend - wstart)); T diff_val = is_empty ? 0. : output_grad_data[i] / bin_area; for (int ih = hstart; ih < hend; ++ih) { for (int iw = wstart; iw < wend; ++iw) { int input_index = ih * width + iw; platform::CudaAtomicAdd(offset_input_grad_data + input_index, diff_val); } } } } template <typename Place, typename T> class GPUPSROIPoolOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out = ctx.Output<Tensor>("Out"); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto output_channels = ctx.Attr<int>("output_channels"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int input_channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; PADDLE_ENFORCE_EQ( input_channels, output_channels * pooled_height * pooled_width, platform::errors::InvalidArgument( "The channels %d of input X should equal the product of " "output_channels %d x pooled_height %d x pooled_width %d.", input_channels, output_channels, pooled_height, pooled_width)); int rois_num = rois->dims()[0]; if (rois_num == 0) return; int rois_batch_size; framework::Tensor rois_batch_id_list; rois_batch_id_list.Resize({rois_num}); int* rois_batch_id_data = rois_batch_id_list.mutable_data<int>(platform::CPUPlace()); if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); rois_batch_size = rois_num_t->numel(); auto* rois_num_data = rois_num_t->data<int>(); PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(platform::CPUPlace(), rois_num_list.data(), ctx.GetPlace(), rois_num_data, sizeof(int) * rois_batch_size, 0); int rois_num_count = 0; for (int i = 0; i < rois_batch_size; ++i) { rois_num_count += rois_num_list[i]; } PADDLE_ENFORCE_EQ( rois_num_count, rois_num, platform::errors::InvalidArgument( "the rois_num from input and RoisNum must be the same")); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { rois_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois->lod().back(); rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, platform::errors::InvalidArgument( "The number of rois from input(ROIs) and its LOD " "must be the same. Received rois %d of input(ROIs) " "but the number of rois %d from its LOD is %d", rois_num, rois_num_with_lod)); // set rois batch id for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { rois_batch_id_data[i] = n; } } } framework::Tensor rois_batch_id_list_gpu; framework::TensorCopy(rois_batch_id_list, ctx.GetPlace(), ctx.device_context(), &rois_batch_id_list_gpu); int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; // call cuda kernel function hipLaunchKernelGGL(( GPUPSROIPoolForward< T>), dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(), output_size, in->data<T>(), rois->data<T>(), spatial_scale, input_channels, height, width, output_channels, pooled_height, pooled_width, rois_batch_id_list_gpu.data<int>(), out->mutable_data<T>(ctx.GetPlace())); } }; template <typename Place, typename T> class GPUPSROIPoolGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto output_channels = ctx.Attr<int>("output_channels"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); int rois_num = rois->dims()[0]; int input_channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (input_grad) { // set roi batch id framework::Tensor rois_batch_id_list; rois_batch_id_list.Resize({rois_num}); int* rois_batch_id_data = rois_batch_id_list.mutable_data<int>(platform::CPUPlace()); int rois_batch_size; if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); rois_batch_size = rois_num_t->numel(); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(platform::CPUPlace(), rois_num_list.data(), ctx.GetPlace(), rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { rois_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois->lod().back(); rois_batch_size = rois_lod.size() - 1; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { rois_batch_id_data[i] = n; } } } framework::Tensor rois_batch_id_list_gpu; framework::TensorCopy(rois_batch_id_list, ctx.GetPlace(), ctx.device_context(), &rois_batch_id_list_gpu); input_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<Place, T> set_zero; set_zero(ctx.cuda_device_context(), input_grad, static_cast<T>(0)); int output_grad_size = output_grad->numel(); int blocks = NumBlocks(output_grad_size); int threads = kNumCUDAThreads; if (output_grad_size > 0) { hipLaunchKernelGGL(( GPUPSROIPoolBackward< T>), dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(), output_grad_size, rois->data<T>(), output_grad->data<T>(), spatial_scale, input_channels, height, width, output_channels, pooled_height, pooled_width, rois_batch_id_list_gpu.data<int>(), input_grad->mutable_data<T>(ctx.GetPlace())); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( psroi_pool, ops::GPUPSROIPoolOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUPSROIPoolOpKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( psroi_pool_grad, ops::GPUPSROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUPSROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, double>);
afa028c38afab3ae8ff934307e37c381f7aefc95.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/psroi_pool_op.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaximumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaximumNumBlocks); } template <typename T> __global__ void GPUPSROIPoolForward( const int nthreads, const T* input_data, const T* input_rois, const float spatial_scale, const int input_channels, const int height, const int width, const int output_channels, const int pooled_height, const int pooled_width, const int* rois_batch_id_data, T* output_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { // The output is in order (n, c, ph, pw) int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % output_channels; int n = i / pooled_width / pooled_height / output_channels; // set roi_batch_id int roi_batch_id = rois_batch_id_data[n]; // [start, end) interval for spatial sampling const T* offset_input_rois = input_rois + n * 4; T roi_start_w = static_cast<T>(round(offset_input_rois[0])) * spatial_scale; T roi_start_h = static_cast<T>(round(offset_input_rois[1])) * spatial_scale; T roi_end_w = static_cast<T>(round(offset_input_rois[2]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>(round(offset_input_rois[3]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_height = max(roi_end_h - roi_start_h, (T)0.1); // avoid 0 T roi_width = max(roi_end_w - roi_start_w, (T)0.1); // Compute w and h at input feature map T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor(bin_size_h * static_cast<T>(ph) + roi_start_h); int wstart = floor(bin_size_w * static_cast<T>(pw) + roi_start_w); int hend = ceil(bin_size_h * static_cast<T>(ph + 1) + roi_start_h); int wend = ceil(bin_size_w * static_cast<T>(pw + 1) + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int input_channel = (c * pooled_height + ph) * pooled_width + pw; const T* offset_input_data = input_data + (roi_batch_id * input_channels + input_channel) * height * width; T outsum = 0; for (int ih = hstart; ih < hend; ++ih) { for (int iw = wstart; iw < wend; ++iw) { int input_index = ih * width + iw; outsum += offset_input_data[input_index]; } } T bin_area = static_cast<T>((hend - hstart) * (wend - wstart)); output_data[i] = is_empty ? 0. : outsum / bin_area; } } template <typename T> __global__ void GPUPSROIPoolBackward( const int nthreads, const T* input_rois, const T* output_grad_data, const float spatial_scale, const int input_channels, const int height, const int width, const int output_channels, const int pooled_height, const int pooled_width, const int* rois_batch_id_data, T* input_grad_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { // The output is in order (n, c, ph, pw) int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % output_channels; int n = i / pooled_width / pooled_height / output_channels; // set roi_batch_id int roi_batch_id = rois_batch_id_data[n]; int input_channel = (c * pooled_height + ph) * pooled_width + pw; int input_offset = (roi_batch_id * input_channels + input_channel) * height * width; T* offset_input_grad_data = input_grad_data + input_offset; // [start, end) interval for spatial sampling const T* offset_input_rois = input_rois + n * 4; T roi_start_w = static_cast<T>(round(offset_input_rois[0])) * spatial_scale; T roi_start_h = static_cast<T>(round(offset_input_rois[1])) * spatial_scale; T roi_end_w = static_cast<T>(round(offset_input_rois[2]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>(round(offset_input_rois[3]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_height = max(roi_end_h - roi_start_h, (T)0.1); // avoid 0 T roi_width = max(roi_end_w - roi_start_w, (T)0.1); // Compute w and h at input feature map T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor(bin_size_h * static_cast<T>(ph) + roi_start_h); int wstart = floor(bin_size_w * static_cast<T>(pw) + roi_start_w); int hend = ceil(bin_size_h * static_cast<T>(ph + 1) + roi_start_h); int wend = ceil(bin_size_w * static_cast<T>(pw + 1) + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Accumulate diff_val into input data T bin_area = static_cast<T>((hend - hstart) * (wend - wstart)); T diff_val = is_empty ? 0. : output_grad_data[i] / bin_area; for (int ih = hstart; ih < hend; ++ih) { for (int iw = wstart; iw < wend; ++iw) { int input_index = ih * width + iw; platform::CudaAtomicAdd(offset_input_grad_data + input_index, diff_val); } } } } template <typename Place, typename T> class GPUPSROIPoolOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out = ctx.Output<Tensor>("Out"); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto output_channels = ctx.Attr<int>("output_channels"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int input_channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; PADDLE_ENFORCE_EQ( input_channels, output_channels * pooled_height * pooled_width, platform::errors::InvalidArgument( "The channels %d of input X should equal the product of " "output_channels %d x pooled_height %d x pooled_width %d.", input_channels, output_channels, pooled_height, pooled_width)); int rois_num = rois->dims()[0]; if (rois_num == 0) return; int rois_batch_size; framework::Tensor rois_batch_id_list; rois_batch_id_list.Resize({rois_num}); int* rois_batch_id_data = rois_batch_id_list.mutable_data<int>(platform::CPUPlace()); if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); rois_batch_size = rois_num_t->numel(); auto* rois_num_data = rois_num_t->data<int>(); PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(platform::CPUPlace(), rois_num_list.data(), ctx.GetPlace(), rois_num_data, sizeof(int) * rois_batch_size, 0); int rois_num_count = 0; for (int i = 0; i < rois_batch_size; ++i) { rois_num_count += rois_num_list[i]; } PADDLE_ENFORCE_EQ( rois_num_count, rois_num, platform::errors::InvalidArgument( "the rois_num from input and RoisNum must be the same")); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { rois_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois->lod().back(); rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, platform::errors::InvalidArgument( "The number of rois from input(ROIs) and its LOD " "must be the same. Received rois %d of input(ROIs) " "but the number of rois %d from its LOD is %d", rois_num, rois_num_with_lod)); // set rois batch id for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { rois_batch_id_data[i] = n; } } } framework::Tensor rois_batch_id_list_gpu; framework::TensorCopy(rois_batch_id_list, ctx.GetPlace(), ctx.device_context(), &rois_batch_id_list_gpu); int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; // call cuda kernel function GPUPSROIPoolForward< T><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( output_size, in->data<T>(), rois->data<T>(), spatial_scale, input_channels, height, width, output_channels, pooled_height, pooled_width, rois_batch_id_list_gpu.data<int>(), out->mutable_data<T>(ctx.GetPlace())); } }; template <typename Place, typename T> class GPUPSROIPoolGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto output_channels = ctx.Attr<int>("output_channels"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); int rois_num = rois->dims()[0]; int input_channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (input_grad) { // set roi batch id framework::Tensor rois_batch_id_list; rois_batch_id_list.Resize({rois_num}); int* rois_batch_id_data = rois_batch_id_list.mutable_data<int>(platform::CPUPlace()); int rois_batch_size; if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); rois_batch_size = rois_num_t->numel(); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(platform::CPUPlace(), rois_num_list.data(), ctx.GetPlace(), rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { rois_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois->lod().back(); rois_batch_size = rois_lod.size() - 1; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { rois_batch_id_data[i] = n; } } } framework::Tensor rois_batch_id_list_gpu; framework::TensorCopy(rois_batch_id_list, ctx.GetPlace(), ctx.device_context(), &rois_batch_id_list_gpu); input_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<Place, T> set_zero; set_zero(ctx.cuda_device_context(), input_grad, static_cast<T>(0)); int output_grad_size = output_grad->numel(); int blocks = NumBlocks(output_grad_size); int threads = kNumCUDAThreads; if (output_grad_size > 0) { GPUPSROIPoolBackward< T><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( output_grad_size, rois->data<T>(), output_grad->data<T>(), spatial_scale, input_channels, height, width, output_channels, pooled_height, pooled_width, rois_batch_id_list_gpu.data<int>(), input_grad->mutable_data<T>(ctx.GetPlace())); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( psroi_pool, ops::GPUPSROIPoolOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUPSROIPoolOpKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( psroi_pool_grad, ops::GPUPSROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUPSROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, double>);
d775eccc52a10635bad86d294495cbcda32c9c7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <torch/device/ReflectanceCostFunction.cuh> #include <optix_math.h> #include <lynx/Exception.h> namespace torch { // launch: 3 * number of vertices // params: 3 * number of vertices // residual: 3 * number of vertices // jacobian: non-zero value buffer (3 * number of map-indices) // size: 3 * number of vertices // map: column indices (row "owner" indicated by offsets) // offsets: number of vertices + 1 (points to addresses in map) // chromThr: minimum acceptable dot(ch1, ch2) that defines "similar" albedos // weight: scalar penalty apply to difference between albedos __global__ void EvaluateKernel(const float* params, float* residuals, float* jacobian, size_t size, const uint* map, const uint* offsets, float chromThreshold, float weight) { // vertex-channel index in CONSTRAINTS const unsigned int threadIndex = blockIdx.x * blockDim.x + threadIdx.x; // check if thread is valid if (threadIndex < size) { // vertex index in CONSTRAINTS const unsigned int vertexIndex = threadIndex / 3; // channel offset for vertex index above const unsigned int channelIndex = threadIndex % 3; // vertex-channel index of R channel in RGB albedo params const unsigned int vertexFirstChannelIndex = 3 * vertexIndex; // pointer to RGB albedo for vertex const float3 albedo = reinterpret_cast<const float3&>(params[vertexFirstChannelIndex]); // norm of albedo for checking blackness const float albedoNorm = length(albedo); // indicates if albedo is black const bool isBlack = albedoNorm < 0.1f; // chromaticity of vertex float3 chrom = (isBlack) ? make_float3(0, 0, 0) : albedo / albedoNorm; // first index in map associated with current vertex const unsigned int mapStart = offsets[vertexIndex]; // firrst index in map NOT associated with current vertex after mapStart const unsigned int mapStop = offsets[vertexIndex + 1]; // declare self map index for future assignment unsigned int selfMapIndex = 100; // initialize number of brighter adjacent vertices to zero unsigned int brighterAdjacentCount = 0; // compare vertex against all adjacent vertices for (unsigned int mapIndex = mapStart; mapIndex < mapStop; ++mapIndex) { // check if jacobian requested if (jacobian) { // get index of jacobian value const unsigned int jacobianIndex = 3 * mapIndex + channelIndex; // compute and store jacobian jacobian[jacobianIndex] = 0.0f; } // vertex index of adjacent vertex in PARAMETERS const unsigned int adjVertexIndex = map[mapIndex]; // check if adjacent vertex is self if (adjVertexIndex == vertexIndex) { // assign map index of self selfMapIndex = mapIndex; // skip remainder of loop continue; } // adjacent vertex-channel index of R channel in RGB albedo params const unsigned int adjVertexFirstChannelIndex = 3 * adjVertexIndex; // pointer to RGB albedo for adjacent vertex const float3 adjAlbedo = reinterpret_cast<const float3&>(params[adjVertexFirstChannelIndex]); // norm of adjacent vertex albedo for checking blackness const float adjAlbedoNorm = length(adjAlbedo); // indicates if adjacent albedo is black const bool adjIsBlack = adjAlbedoNorm < 0.1f; // check if adjacent vertex is not black if (!adjIsBlack) { // chromaticity of adjacent vertex // use identical chromaticity if adjacent vertex is black const float3 adjChrom = adjAlbedo / adjAlbedoNorm; // copy adjacent chromaticity if vertex is black chrom = (isBlack) ? adjChrom : albedo / albedoNorm; // get dot product of both chromaticities const float dotChrom = dot(chrom, adjChrom); // check if chromaticities are similiar enough if (dotChrom > chromThreshold) { // color channel value of vertex-channel const float channel = reinterpret_cast<const float*>(&albedo)[channelIndex]; // color channel value of adjacent vertex-channel const float adjChannel = reinterpret_cast<const float*>(&adjAlbedo)[channelIndex]; // check if vertex is darker than adjacent vertex if (channel < adjChannel) { // get index of residual for vertex-channel const unsigned int residualIndex = threadIndex; // compute and store residual residuals[residualIndex] += weight * (adjChannel - channel); // increment brighter adjacent vertex count ++brighterAdjacentCount; // check if jacobian requested if (jacobian) { // get index of jacobian value const unsigned int jacobianIndex = 3 * mapIndex + channelIndex; // compute and store jacobian jacobian[jacobianIndex] = weight; } } } } } // check if jacobian requested if (jacobian) { // get index of jacobian value const unsigned int jacobianIndex = 3 * selfMapIndex + channelIndex; // compute and store jacobian jacobian[jacobianIndex] = -1.0f * brighterAdjacentCount; } } } void Evaluate(const float* params, float* residual, float* jacobian, size_t size, const uint* map, const uint* offsets, float chromThreshold, float weight) { LYNX_ASSERT(size <= (1024 * 65535), "unsupported launch size"); const size_t blockDim = (size > 1024) ? 1024 : size; const size_t gridDim = (size + blockDim - 1) / blockDim; hipLaunchKernelGGL(( EvaluateKernel), dim3(gridDim), dim3(blockDim), 0, 0, params, residual, jacobian, size, map, offsets, chromThreshold, weight); } } // namespace torch
d775eccc52a10635bad86d294495cbcda32c9c7d.cu
#include <torch/device/ReflectanceCostFunction.cuh> #include <optix_math.h> #include <lynx/Exception.h> namespace torch { // launch: 3 * number of vertices // params: 3 * number of vertices // residual: 3 * number of vertices // jacobian: non-zero value buffer (3 * number of map-indices) // size: 3 * number of vertices // map: column indices (row "owner" indicated by offsets) // offsets: number of vertices + 1 (points to addresses in map) // chromThr: minimum acceptable dot(ch1, ch2) that defines "similar" albedos // weight: scalar penalty apply to difference between albedos __global__ void EvaluateKernel(const float* params, float* residuals, float* jacobian, size_t size, const uint* map, const uint* offsets, float chromThreshold, float weight) { // vertex-channel index in CONSTRAINTS const unsigned int threadIndex = blockIdx.x * blockDim.x + threadIdx.x; // check if thread is valid if (threadIndex < size) { // vertex index in CONSTRAINTS const unsigned int vertexIndex = threadIndex / 3; // channel offset for vertex index above const unsigned int channelIndex = threadIndex % 3; // vertex-channel index of R channel in RGB albedo params const unsigned int vertexFirstChannelIndex = 3 * vertexIndex; // pointer to RGB albedo for vertex const float3 albedo = reinterpret_cast<const float3&>(params[vertexFirstChannelIndex]); // norm of albedo for checking blackness const float albedoNorm = length(albedo); // indicates if albedo is black const bool isBlack = albedoNorm < 0.1f; // chromaticity of vertex float3 chrom = (isBlack) ? make_float3(0, 0, 0) : albedo / albedoNorm; // first index in map associated with current vertex const unsigned int mapStart = offsets[vertexIndex]; // firrst index in map NOT associated with current vertex after mapStart const unsigned int mapStop = offsets[vertexIndex + 1]; // declare self map index for future assignment unsigned int selfMapIndex = 100; // initialize number of brighter adjacent vertices to zero unsigned int brighterAdjacentCount = 0; // compare vertex against all adjacent vertices for (unsigned int mapIndex = mapStart; mapIndex < mapStop; ++mapIndex) { // check if jacobian requested if (jacobian) { // get index of jacobian value const unsigned int jacobianIndex = 3 * mapIndex + channelIndex; // compute and store jacobian jacobian[jacobianIndex] = 0.0f; } // vertex index of adjacent vertex in PARAMETERS const unsigned int adjVertexIndex = map[mapIndex]; // check if adjacent vertex is self if (adjVertexIndex == vertexIndex) { // assign map index of self selfMapIndex = mapIndex; // skip remainder of loop continue; } // adjacent vertex-channel index of R channel in RGB albedo params const unsigned int adjVertexFirstChannelIndex = 3 * adjVertexIndex; // pointer to RGB albedo for adjacent vertex const float3 adjAlbedo = reinterpret_cast<const float3&>(params[adjVertexFirstChannelIndex]); // norm of adjacent vertex albedo for checking blackness const float adjAlbedoNorm = length(adjAlbedo); // indicates if adjacent albedo is black const bool adjIsBlack = adjAlbedoNorm < 0.1f; // check if adjacent vertex is not black if (!adjIsBlack) { // chromaticity of adjacent vertex // use identical chromaticity if adjacent vertex is black const float3 adjChrom = adjAlbedo / adjAlbedoNorm; // copy adjacent chromaticity if vertex is black chrom = (isBlack) ? adjChrom : albedo / albedoNorm; // get dot product of both chromaticities const float dotChrom = dot(chrom, adjChrom); // check if chromaticities are similiar enough if (dotChrom > chromThreshold) { // color channel value of vertex-channel const float channel = reinterpret_cast<const float*>(&albedo)[channelIndex]; // color channel value of adjacent vertex-channel const float adjChannel = reinterpret_cast<const float*>(&adjAlbedo)[channelIndex]; // check if vertex is darker than adjacent vertex if (channel < adjChannel) { // get index of residual for vertex-channel const unsigned int residualIndex = threadIndex; // compute and store residual residuals[residualIndex] += weight * (adjChannel - channel); // increment brighter adjacent vertex count ++brighterAdjacentCount; // check if jacobian requested if (jacobian) { // get index of jacobian value const unsigned int jacobianIndex = 3 * mapIndex + channelIndex; // compute and store jacobian jacobian[jacobianIndex] = weight; } } } } } // check if jacobian requested if (jacobian) { // get index of jacobian value const unsigned int jacobianIndex = 3 * selfMapIndex + channelIndex; // compute and store jacobian jacobian[jacobianIndex] = -1.0f * brighterAdjacentCount; } } } void Evaluate(const float* params, float* residual, float* jacobian, size_t size, const uint* map, const uint* offsets, float chromThreshold, float weight) { LYNX_ASSERT(size <= (1024 * 65535), "unsupported launch size"); const size_t blockDim = (size > 1024) ? 1024 : size; const size_t gridDim = (size + blockDim - 1) / blockDim; EvaluateKernel<<<gridDim, blockDim>>>(params, residual, jacobian, size, map, offsets, chromThreshold, weight); } } // namespace torch
aa32f91201c58c4539063115d466eb18a20f563f.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "im2col.h" #include "THHTensor.hpp" #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" #include "generic/SpatialFullDilatedConvolution.cu" #include "THHGenerateFloatTypes.h"
aa32f91201c58c4539063115d466eb18a20f563f.cu
#include "THCUNN.h" #include "im2col.h" #include "THCTensor.hpp" #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" #include "generic/SpatialFullDilatedConvolution.cu" #include "THCGenerateFloatTypes.h"
ea1a0bfca25ab62bc59786b77206784185cc7f6c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Azzam Haidar @author Ahmad Abdelfattah @precisions normal z -> s d c */ #include "magma_internal.h" #include "magma_templates.h" #include "shuffle.cuh" #include "sync.cuh" #include "atomics.cuh" #include "batched_kernel_param.h" #define PRECISION_z /** Purpose ------- LU factorization of m-by-n matrix ( m >= n ). Each thread block caches an entire column in register. Thread blocks communicate and synchronize through global memory. Assumptions: 1. dA is of size MxN such that N <= M. 2. Thread block must be 1D, with TX multiple of 32 (warp size) 3. TX must be >= n 4. n must be less than the number of SMs on the GPU **/ // ============================================================================= // init kernel __global__ void zgetf2_native_init_kernel( int n, int npages, magma_int_t *ipiv, int* update_flags) { const int tx = threadIdx.x; if( tx < n){ ipiv[ tx ] = 0; } if( tx < max(n,npages) ){ update_flags[ tx ] = 0; } } // ============================================================================= // the main kernel template<int TX, int NPAGES> __global__ void zgetf2_native_kernel( int m, int n, magmaDoubleComplex_ptr dA, int ldda, volatile magma_int_t *ipiv, int gbstep, volatile int* update_flag, volatile magma_int_t *info) { const int tx = threadIdx.x; const int bx = blockIdx.x; magmaDoubleComplex rA[NPAGES] = {MAGMA_Z_ZERO}; magmaDoubleComplex rx, rx_max; magmaDoubleComplex_ptr da = dA; int rx_id, max_id, flag = 0; double rx_abs = 0.0, rx_abs_max = 0.0; const int m_ = m-(NPAGES-1)*TX; if( bx >= n ) return; __shared__ magmaDoubleComplex sx[ TX ]; __shared__ double sabs[ TX ]; __shared__ int smax_id[ TX ]; __shared__ magmaDoubleComplex sreg; // read dA += bx * ldda + tx; #pragma unroll for(int i = 0; i < NPAGES-1; i++){ rA[i] = dA[ i * TX ]; } if( tx < m_){ rA[NPAGES-1] = dA[ (NPAGES-1) * TX ]; } // main loop #pragma unroll for(int i = 0; i < n; i++){ // izamax and write pivot for the ith thread block if(bx == i){ rx_max = rx = (tx < i) ? MAGMA_Z_ZERO : rA[0]; rx_abs_max = rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx)); max_id = rx_id = tx; #pragma unroll for(int j = 1; j < NPAGES; j++){ rx = rA[j]; rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx)); if ( rx_abs > rx_abs_max ){ rx_max = rx; rx_abs_max = rx_abs; max_id = j * TX + tx; } } sx[ tx ] = rx_max; sabs[ tx ] = rx_abs_max; smax_id[ tx ] = max_id; __syncthreads(); // let the first warp do the final reduction step if(tx < 32){ #pragma unroll for(int j = 0; j < TX; j+= 32){ rx = sx[ j + tx ]; rx_abs = sabs[ j + tx ]; rx_id = smax_id[ j + tx ]; if ( rx_abs > rx_abs_max ){ rx_max = rx; rx_abs_max = rx_abs; max_id = rx_id; } } magmablas_syncwarp(); sx[ tx ] = rx_max; sabs[ tx ] = rx_abs_max; smax_id[ tx ] = max_id; magmablas_syncwarp(); #pragma unroll for(int j = 0; j < 32; j++){ rx = sx[j]; rx_abs = sabs[j]; rx_id = smax_id[j]; if ( rx_abs > rx_abs_max ){ rx_abs_max = rx_abs; rx_max = rx; max_id = rx_id; } } } if(tx == 0){ sx[ 0 ] = rx_max; sabs[ 0 ] = rx_abs_max; smax_id[ 0 ] = max_id; } __syncthreads(); rx_max = sx[ 0 ]; rx_abs_max = sabs[ 0 ]; max_id = smax_id[ 0 ]; __syncthreads(); // now every thread in the i^th block has the maximum if( tx == 0){ if( rx_abs_max == MAGMA_D_ZERO){ magmablas_iatomic_exchange( (magma_int_t*)info, (magma_int_t)(max_id + gbstep + 1) ); } magmablas_iatomic_exchange((magma_int_t*)&ipiv[i], (magma_int_t)(max_id+1) ); // fortran indexing } __syncthreads(); if( rx_abs_max == MAGMA_D_ZERO )return; } else{ // other thread blocks are waiting if(tx == 0){ max_id = 0; while( max_id == 0 ){ max_id = ipiv[i]; }; smax_id[ 0 ] = max_id; } __syncthreads(); max_id = smax_id[ 0 ]; max_id -= 1; // revert fortran indexing __syncthreads(); if( (*info) != 0 ) return; } // swap // swap always happens between page 0 and page x // to avoid spilling rA to local memory, we use shared memory if( max_id != i){ // all blocks swap in registers // for bx < i, the column is already written in memory, // but we have a copy in reg., so continue to swap in reg., // and do one final write to memory #pragma unroll for(int j = 0; j < NPAGES; j++){ if( j == (max_id/TX) ){ sx[ tx ] = rA[j]; __syncthreads(); if( tx == i ){ magmaDoubleComplex tmp = sx[ max_id%TX ]; sx[ max_id%TX ] = rA[0]; rA[0] = tmp; } __syncthreads(); if( tx == max_id%TX ){ rA[j] = sx[ tx ]; } __syncthreads(); } } //__syncthreads(); } // the ith block does scal if(bx == i){ magmaDoubleComplex reg = MAGMA_Z_DIV(MAGMA_Z_ONE, rx_max ); // scal if( tx > i ){ rA[0] *= reg; } #pragma unroll for(int j = 1; j < NPAGES; j++){ rA[j] *= reg; } // write column i to global memory #pragma unroll for(int j = 0; j < NPAGES-1; j++){ dA[ j * TX ] = rA[j]; } if( tx < m_){ dA[ (NPAGES-1) * TX ] = rA[NPAGES-1]; } __threadfence(); __syncthreads(); // after cuda 9.0, both are needed, not sure why if(tx == 0) magmablas_iatomic_exchange( (int *)&update_flag[ i ], 1); } // thread blocks with ID larger than i perform ger if(bx > i){ if( tx == i ){ sreg = rA[0]; } // wait for scal if( tx == 0){ flag = 0; while( flag == 0 ){ flag = update_flag[ i ]; }; } __syncthreads(); magmaDoubleComplex reg = sreg; if( NPAGES == 1){ if(tx > i && tx < m_){ rA[0] -= da[ i * ldda + tx ] * reg; } }else{ if(tx > i){ rA[0] -= da[ i * ldda + tx ] * reg; } } #pragma unroll for(int j = 1; j < NPAGES-1; j++){ rA[j] -= da[ i * ldda + j * TX + tx ] * reg; } if( NPAGES > 1){ if( tx < m_ ){ rA[ NPAGES-1 ] -= da[ i * ldda + (NPAGES-1)*TX + tx ] * reg; } } } } // all blocks write their columns again except the last one if( bx < n-1 ){ #pragma unroll for(int i = 0; i < NPAGES-1; i++){ dA[ i * TX ] = rA[i]; } if( tx < m_){ dA[ (NPAGES-1) * TX ] = rA[NPAGES-1]; } } } // ============================================================================= extern "C" magma_int_t magma_zgetf2_native_fused( magma_int_t m, magma_int_t n, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t *ipiv, magma_int_t gbstep, magma_int_t *flags, magma_int_t *info, magma_queue_t queue ) { magma_int_t arginfo = 0; const magma_int_t ntx = ZGETF2_FUSED_NTH; if( m < n || m > ZGETF2_FUSED_MAX_M ){ arginfo = -1; } else if( n > magma_getdevice_multiprocessor_count() ){ arginfo = -2; } else if( ldda < max(1, m) ){ arginfo = -4; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } magma_int_t arch = magma_getdevice_arch(); dim3 grid(n, 1, 1); dim3 threads(ntx, 1, 1); const magma_int_t npages = magma_ceildiv(m, ntx); // the kernel uses communication among thread blocks // as a safeguard, force one thread block per multiprocessor // by allocating more than half the shared memory magma_int_t shmem = magma_getdevice_shmem_block(); shmem = (shmem / 2); int *update_flag = (int*) flags; // update_flag is an int, not magma_int_t hipLaunchKernelGGL(( zgetf2_native_init_kernel), dim3(1), dim3(max(n,npages)), 0, queue->cuda_stream() , n, npages, ipiv, update_flag); // The case statement should cover up to ( xGETF2_CHAIN_MAX_M / ntx ) switch(npages){ case 1:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 1>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 2:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 2>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 3:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 3>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 4:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 4>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 5:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 5>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 6:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 6>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 7:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 7>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 8:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 8>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 9:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 9>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 10:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 10>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 11:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 11>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 12:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 12>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 13:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 13>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 14:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 14>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 15:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 15>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 16:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 16>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 17:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 17>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 18:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 18>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 19:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 19>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 20:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 20>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #if defined(PRECISION_s) || defined(PRECISION_d) case 21:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 21>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 22:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 22>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 23:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 23>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 24:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 24>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 25:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 25>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 26:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 26>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 27:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 27>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 28:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 28>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 29:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 29>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 30:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 30>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 31:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 31>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 32:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 32>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 33:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 33>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 34:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 34>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 35:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 35>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 36:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 36>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 37:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 37>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 38:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 38>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 39:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 39>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 40:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 40>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 41:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 41>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 42:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 42>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 43:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 43>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 44:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 44>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 45:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 45>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 46:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 46>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 47:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 47>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 48:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 48>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 49:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 49>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 50:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 50>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #endif // defined(PRECISION_s) || defined(PRECISION_d) #if defined(PRECISION_s) case 51:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 51>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 52:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 52>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 53:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 53>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 54:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 54>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 55:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 55>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 56:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 56>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 57:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 57>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 58:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 58>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 59:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 59>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 60:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 60>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 61:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 61>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 62:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 62>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 63:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 63>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 64:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 64>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 65:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 65>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 66:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 66>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 67:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 67>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 68:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 68>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 69:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 69>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 70:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 70>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 71:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 71>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 72:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 72>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 73:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 73>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 74:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 74>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 75:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 75>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 76:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 76>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 77:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 77>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 78:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 78>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 79:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 79>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 80:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 80>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #endif // defined(PRECISION_s) default: printf("size not supported \n"); } return 0; }
ea1a0bfca25ab62bc59786b77206784185cc7f6c.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Azzam Haidar @author Ahmad Abdelfattah @precisions normal z -> s d c */ #include "magma_internal.h" #include "magma_templates.h" #include "shuffle.cuh" #include "sync.cuh" #include "atomics.cuh" #include "batched_kernel_param.h" #define PRECISION_z /** Purpose ------- LU factorization of m-by-n matrix ( m >= n ). Each thread block caches an entire column in register. Thread blocks communicate and synchronize through global memory. Assumptions: 1. dA is of size MxN such that N <= M. 2. Thread block must be 1D, with TX multiple of 32 (warp size) 3. TX must be >= n 4. n must be less than the number of SMs on the GPU **/ // ============================================================================= // init kernel __global__ void zgetf2_native_init_kernel( int n, int npages, magma_int_t *ipiv, int* update_flags) { const int tx = threadIdx.x; if( tx < n){ ipiv[ tx ] = 0; } if( tx < max(n,npages) ){ update_flags[ tx ] = 0; } } // ============================================================================= // the main kernel template<int TX, int NPAGES> __global__ void zgetf2_native_kernel( int m, int n, magmaDoubleComplex_ptr dA, int ldda, volatile magma_int_t *ipiv, int gbstep, volatile int* update_flag, volatile magma_int_t *info) { const int tx = threadIdx.x; const int bx = blockIdx.x; magmaDoubleComplex rA[NPAGES] = {MAGMA_Z_ZERO}; magmaDoubleComplex rx, rx_max; magmaDoubleComplex_ptr da = dA; int rx_id, max_id, flag = 0; double rx_abs = 0.0, rx_abs_max = 0.0; const int m_ = m-(NPAGES-1)*TX; if( bx >= n ) return; __shared__ magmaDoubleComplex sx[ TX ]; __shared__ double sabs[ TX ]; __shared__ int smax_id[ TX ]; __shared__ magmaDoubleComplex sreg; // read dA += bx * ldda + tx; #pragma unroll for(int i = 0; i < NPAGES-1; i++){ rA[i] = dA[ i * TX ]; } if( tx < m_){ rA[NPAGES-1] = dA[ (NPAGES-1) * TX ]; } // main loop #pragma unroll for(int i = 0; i < n; i++){ // izamax and write pivot for the ith thread block if(bx == i){ rx_max = rx = (tx < i) ? MAGMA_Z_ZERO : rA[0]; rx_abs_max = rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx)); max_id = rx_id = tx; #pragma unroll for(int j = 1; j < NPAGES; j++){ rx = rA[j]; rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx)); if ( rx_abs > rx_abs_max ){ rx_max = rx; rx_abs_max = rx_abs; max_id = j * TX + tx; } } sx[ tx ] = rx_max; sabs[ tx ] = rx_abs_max; smax_id[ tx ] = max_id; __syncthreads(); // let the first warp do the final reduction step if(tx < 32){ #pragma unroll for(int j = 0; j < TX; j+= 32){ rx = sx[ j + tx ]; rx_abs = sabs[ j + tx ]; rx_id = smax_id[ j + tx ]; if ( rx_abs > rx_abs_max ){ rx_max = rx; rx_abs_max = rx_abs; max_id = rx_id; } } magmablas_syncwarp(); sx[ tx ] = rx_max; sabs[ tx ] = rx_abs_max; smax_id[ tx ] = max_id; magmablas_syncwarp(); #pragma unroll for(int j = 0; j < 32; j++){ rx = sx[j]; rx_abs = sabs[j]; rx_id = smax_id[j]; if ( rx_abs > rx_abs_max ){ rx_abs_max = rx_abs; rx_max = rx; max_id = rx_id; } } } if(tx == 0){ sx[ 0 ] = rx_max; sabs[ 0 ] = rx_abs_max; smax_id[ 0 ] = max_id; } __syncthreads(); rx_max = sx[ 0 ]; rx_abs_max = sabs[ 0 ]; max_id = smax_id[ 0 ]; __syncthreads(); // now every thread in the i^th block has the maximum if( tx == 0){ if( rx_abs_max == MAGMA_D_ZERO){ magmablas_iatomic_exchange( (magma_int_t*)info, (magma_int_t)(max_id + gbstep + 1) ); } magmablas_iatomic_exchange((magma_int_t*)&ipiv[i], (magma_int_t)(max_id+1) ); // fortran indexing } __syncthreads(); if( rx_abs_max == MAGMA_D_ZERO )return; } else{ // other thread blocks are waiting if(tx == 0){ max_id = 0; while( max_id == 0 ){ max_id = ipiv[i]; }; smax_id[ 0 ] = max_id; } __syncthreads(); max_id = smax_id[ 0 ]; max_id -= 1; // revert fortran indexing __syncthreads(); if( (*info) != 0 ) return; } // swap // swap always happens between page 0 and page x // to avoid spilling rA to local memory, we use shared memory if( max_id != i){ // all blocks swap in registers // for bx < i, the column is already written in memory, // but we have a copy in reg., so continue to swap in reg., // and do one final write to memory #pragma unroll for(int j = 0; j < NPAGES; j++){ if( j == (max_id/TX) ){ sx[ tx ] = rA[j]; __syncthreads(); if( tx == i ){ magmaDoubleComplex tmp = sx[ max_id%TX ]; sx[ max_id%TX ] = rA[0]; rA[0] = tmp; } __syncthreads(); if( tx == max_id%TX ){ rA[j] = sx[ tx ]; } __syncthreads(); } } //__syncthreads(); } // the ith block does scal if(bx == i){ magmaDoubleComplex reg = MAGMA_Z_DIV(MAGMA_Z_ONE, rx_max ); // scal if( tx > i ){ rA[0] *= reg; } #pragma unroll for(int j = 1; j < NPAGES; j++){ rA[j] *= reg; } // write column i to global memory #pragma unroll for(int j = 0; j < NPAGES-1; j++){ dA[ j * TX ] = rA[j]; } if( tx < m_){ dA[ (NPAGES-1) * TX ] = rA[NPAGES-1]; } __threadfence(); __syncthreads(); // after cuda 9.0, both are needed, not sure why if(tx == 0) magmablas_iatomic_exchange( (int *)&update_flag[ i ], 1); } // thread blocks with ID larger than i perform ger if(bx > i){ if( tx == i ){ sreg = rA[0]; } // wait for scal if( tx == 0){ flag = 0; while( flag == 0 ){ flag = update_flag[ i ]; }; } __syncthreads(); magmaDoubleComplex reg = sreg; if( NPAGES == 1){ if(tx > i && tx < m_){ rA[0] -= da[ i * ldda + tx ] * reg; } }else{ if(tx > i){ rA[0] -= da[ i * ldda + tx ] * reg; } } #pragma unroll for(int j = 1; j < NPAGES-1; j++){ rA[j] -= da[ i * ldda + j * TX + tx ] * reg; } if( NPAGES > 1){ if( tx < m_ ){ rA[ NPAGES-1 ] -= da[ i * ldda + (NPAGES-1)*TX + tx ] * reg; } } } } // all blocks write their columns again except the last one if( bx < n-1 ){ #pragma unroll for(int i = 0; i < NPAGES-1; i++){ dA[ i * TX ] = rA[i]; } if( tx < m_){ dA[ (NPAGES-1) * TX ] = rA[NPAGES-1]; } } } // ============================================================================= extern "C" magma_int_t magma_zgetf2_native_fused( magma_int_t m, magma_int_t n, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t *ipiv, magma_int_t gbstep, magma_int_t *flags, magma_int_t *info, magma_queue_t queue ) { magma_int_t arginfo = 0; const magma_int_t ntx = ZGETF2_FUSED_NTH; if( m < n || m > ZGETF2_FUSED_MAX_M ){ arginfo = -1; } else if( n > magma_getdevice_multiprocessor_count() ){ arginfo = -2; } else if( ldda < max(1, m) ){ arginfo = -4; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } magma_int_t arch = magma_getdevice_arch(); dim3 grid(n, 1, 1); dim3 threads(ntx, 1, 1); const magma_int_t npages = magma_ceildiv(m, ntx); // the kernel uses communication among thread blocks // as a safeguard, force one thread block per multiprocessor // by allocating more than half the shared memory magma_int_t shmem = magma_getdevice_shmem_block(); shmem = (shmem / 2); int *update_flag = (int*) flags; // update_flag is an int, not magma_int_t zgetf2_native_init_kernel<<< 1, max(n,npages), 0, queue->cuda_stream() >>>( n, npages, ipiv, update_flag); // The case statement should cover up to ( xGETF2_CHAIN_MAX_M / ntx ) switch(npages){ case 1: zgetf2_native_kernel< ntx, 1><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 2: zgetf2_native_kernel< ntx, 2><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 3: zgetf2_native_kernel< ntx, 3><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 4: zgetf2_native_kernel< ntx, 4><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 5: zgetf2_native_kernel< ntx, 5><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 6: zgetf2_native_kernel< ntx, 6><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 7: zgetf2_native_kernel< ntx, 7><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 8: zgetf2_native_kernel< ntx, 8><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 9: zgetf2_native_kernel< ntx, 9><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 10: zgetf2_native_kernel< ntx, 10><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 11: zgetf2_native_kernel< ntx, 11><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 12: zgetf2_native_kernel< ntx, 12><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 13: zgetf2_native_kernel< ntx, 13><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 14: zgetf2_native_kernel< ntx, 14><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 15: zgetf2_native_kernel< ntx, 15><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 16: zgetf2_native_kernel< ntx, 16><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 17: zgetf2_native_kernel< ntx, 17><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 18: zgetf2_native_kernel< ntx, 18><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 19: zgetf2_native_kernel< ntx, 19><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 20: zgetf2_native_kernel< ntx, 20><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #if defined(PRECISION_s) || defined(PRECISION_d) case 21: zgetf2_native_kernel< ntx, 21><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 22: zgetf2_native_kernel< ntx, 22><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 23: zgetf2_native_kernel< ntx, 23><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 24: zgetf2_native_kernel< ntx, 24><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 25: zgetf2_native_kernel< ntx, 25><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 26: zgetf2_native_kernel< ntx, 26><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 27: zgetf2_native_kernel< ntx, 27><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 28: zgetf2_native_kernel< ntx, 28><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 29: zgetf2_native_kernel< ntx, 29><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 30: zgetf2_native_kernel< ntx, 30><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 31: zgetf2_native_kernel< ntx, 31><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 32: zgetf2_native_kernel< ntx, 32><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 33: zgetf2_native_kernel< ntx, 33><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 34: zgetf2_native_kernel< ntx, 34><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 35: zgetf2_native_kernel< ntx, 35><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 36: zgetf2_native_kernel< ntx, 36><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 37: zgetf2_native_kernel< ntx, 37><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 38: zgetf2_native_kernel< ntx, 38><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 39: zgetf2_native_kernel< ntx, 39><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 40: zgetf2_native_kernel< ntx, 40><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 41: zgetf2_native_kernel< ntx, 41><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 42: zgetf2_native_kernel< ntx, 42><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 43: zgetf2_native_kernel< ntx, 43><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 44: zgetf2_native_kernel< ntx, 44><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 45: zgetf2_native_kernel< ntx, 45><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 46: zgetf2_native_kernel< ntx, 46><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 47: zgetf2_native_kernel< ntx, 47><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 48: zgetf2_native_kernel< ntx, 48><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 49: zgetf2_native_kernel< ntx, 49><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 50: zgetf2_native_kernel< ntx, 50><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #endif // defined(PRECISION_s) || defined(PRECISION_d) #if defined(PRECISION_s) case 51: zgetf2_native_kernel< ntx, 51><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 52: zgetf2_native_kernel< ntx, 52><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 53: zgetf2_native_kernel< ntx, 53><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 54: zgetf2_native_kernel< ntx, 54><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 55: zgetf2_native_kernel< ntx, 55><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 56: zgetf2_native_kernel< ntx, 56><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 57: zgetf2_native_kernel< ntx, 57><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 58: zgetf2_native_kernel< ntx, 58><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 59: zgetf2_native_kernel< ntx, 59><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 60: zgetf2_native_kernel< ntx, 60><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 61: zgetf2_native_kernel< ntx, 61><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 62: zgetf2_native_kernel< ntx, 62><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 63: zgetf2_native_kernel< ntx, 63><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 64: zgetf2_native_kernel< ntx, 64><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 65: zgetf2_native_kernel< ntx, 65><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 66: zgetf2_native_kernel< ntx, 66><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 67: zgetf2_native_kernel< ntx, 67><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 68: zgetf2_native_kernel< ntx, 68><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 69: zgetf2_native_kernel< ntx, 69><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 70: zgetf2_native_kernel< ntx, 70><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 71: zgetf2_native_kernel< ntx, 71><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 72: zgetf2_native_kernel< ntx, 72><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 73: zgetf2_native_kernel< ntx, 73><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 74: zgetf2_native_kernel< ntx, 74><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 75: zgetf2_native_kernel< ntx, 75><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 76: zgetf2_native_kernel< ntx, 76><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 77: zgetf2_native_kernel< ntx, 77><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 78: zgetf2_native_kernel< ntx, 78><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 79: zgetf2_native_kernel< ntx, 79><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; case 80: zgetf2_native_kernel< ntx, 80><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break; #endif // defined(PRECISION_s) default: printf("size not supported \n"); } return 0; }
8bc630af316f690abec6d75856ad24f47596663d.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <ATen/NumericUtils.h> #include <c10/hip/HIPMathCompat.h> #include <c10/core/Scalar.h> #include <c10/util/complex.h> namespace at { namespace native { void bitwise_not_kernel_cuda(TensorIteratorBase& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a) { return !a; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ~a; }); }); } } void exp_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "exp_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp(a); }); }); } void expm1_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::BFloat16, ScalarType::Half, iter.common_dtype(), "expm1_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::expm1(a); }); }); } // We manually overload rsqrt because std::rsqrt does not work with complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) { return ::rsqrt(v); } template<typename T> __host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) { const c10::complex<T> one = c10::complex<T>(1.0, 0); // std::sqrt for c10::complex is overloaded in c10/util/complex_math.h return one / ::sqrt(v); } void rsqrt_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( ScalarType::BFloat16, ScalarType::Half, iter.common_dtype(), "rsqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { // In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float. return rsqrt_wrapper(a); }); }); } void sqrt_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sqrt(a); }); }); } void clamp_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() { auto lower = min_value.to<scalar_t>(); auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_min_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() { auto lower = min_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_max_kernel_cuda(TensorIteratorBase& iter, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() { auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } void nan_to_num_kernel_cuda( TensorIteratorBase& iter, c10::optional<double> nan, c10::optional<double> pos_inf, c10::optional<double> neg_inf) { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "nan_to_num_cuda", [&]() { scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.)); scalar_t pos_inf_replacement = pos_inf.has_value() ? static_cast<scalar_t>(pos_inf.value()) : std::numeric_limits<scalar_t>::max(); scalar_t neg_inf_replacement = neg_inf.has_value() ? static_cast<scalar_t>(neg_inf.value()) : std::numeric_limits<scalar_t>::lowest(); gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t { return ( at::_isnan(a) ? nan_replacement : (a == std::numeric_limits<scalar_t>::infinity() ? pos_inf_replacement : (a == -std::numeric_limits<scalar_t>::infinity() ? neg_inf_replacement : a))); }); }); } void frexp_kernel_cuda(TensorIteratorBase& iter) { #if defined(USE_ROCM) // Reference: https://rocmdocs.amd.com/en/latest/ROCm_API_References/HIP-MATH.html // https://github.com/ROCm-Developer-Tools/HIP/issues/2169 // ROCm does not support frexp function yet TORCH_CHECK(false, "torch.frexp() is not implemented on ROCm platform."); #else AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, // The iter.dtype() here is the dtype of mantissa output. // It's a floating point type and must be the same as the input's dtype. iter.dtype(), "frexp_cuda", [&]() { gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t a) -> thrust::tuple<scalar_t, int32_t> { int32_t exponent; scalar_t mantissa = ::frexp(a, &exponent); return {mantissa, exponent}; }); }); #endif } REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda); REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda); REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda); REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda); REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda); REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda); REGISTER_DISPATCH(frexp_stub, &frexp_kernel_cuda); } // namespace native } // namespace at
8bc630af316f690abec6d75856ad24f47596663d.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/NumericUtils.h> #include <c10/cuda/CUDAMathCompat.h> #include <c10/core/Scalar.h> #include <c10/util/complex.h> namespace at { namespace native { void bitwise_not_kernel_cuda(TensorIteratorBase& iter) { if (iter.dtype() == ScalarType::Bool) { gpu_kernel(iter, []GPU_LAMBDA(bool a) { return !a; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ~a; }); }); } } void exp_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "exp_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return std::exp(a); }); }); } void expm1_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::BFloat16, ScalarType::Half, iter.common_dtype(), "expm1_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::expm1(a); }); }); } // We manually overload rsqrt because std::rsqrt does not work with complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) { return ::rsqrt(v); } template<typename T> __host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) { const c10::complex<T> one = c10::complex<T>(1.0, 0); // std::sqrt for c10::complex is overloaded in c10/util/complex_math.h return one / ::sqrt(v); } void rsqrt_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( ScalarType::BFloat16, ScalarType::Half, iter.common_dtype(), "rsqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { // In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float. return rsqrt_wrapper(a); }); }); } void sqrt_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sqrt_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sqrt(a); }); }); } void clamp_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() { auto lower = min_value.to<scalar_t>(); auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_min_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() { auto lower = min_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_max_kernel_cuda(TensorIteratorBase& iter, const Scalar& max_value) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() { auto upper = max_value.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } void nan_to_num_kernel_cuda( TensorIteratorBase& iter, c10::optional<double> nan, c10::optional<double> pos_inf, c10::optional<double> neg_inf) { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "nan_to_num_cuda", [&]() { scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.)); scalar_t pos_inf_replacement = pos_inf.has_value() ? static_cast<scalar_t>(pos_inf.value()) : std::numeric_limits<scalar_t>::max(); scalar_t neg_inf_replacement = neg_inf.has_value() ? static_cast<scalar_t>(neg_inf.value()) : std::numeric_limits<scalar_t>::lowest(); gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t { return ( at::_isnan(a) ? nan_replacement : (a == std::numeric_limits<scalar_t>::infinity() ? pos_inf_replacement : (a == -std::numeric_limits<scalar_t>::infinity() ? neg_inf_replacement : a))); }); }); } void frexp_kernel_cuda(TensorIteratorBase& iter) { #if defined(USE_ROCM) // Reference: https://rocmdocs.amd.com/en/latest/ROCm_API_References/HIP-MATH.html // https://github.com/ROCm-Developer-Tools/HIP/issues/2169 // ROCm does not support frexp function yet TORCH_CHECK(false, "torch.frexp() is not implemented on ROCm platform."); #else AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, // The iter.dtype() here is the dtype of mantissa output. // It's a floating point type and must be the same as the input's dtype. iter.dtype(), "frexp_cuda", [&]() { gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t a) -> thrust::tuple<scalar_t, int32_t> { int32_t exponent; scalar_t mantissa = std::frexp(a, &exponent); return {mantissa, exponent}; }); }); #endif } REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda); REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda); REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda); REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda); REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda); REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda); REGISTER_DISPATCH(frexp_stub, &frexp_kernel_cuda); } // namespace native } // namespace at
2713ef3a4e4e4804edc683119c136b8decd1ab8e.hip
// !!! This is a file automatically generated by hipify!!! #include "InterpDensSpecies.h" void interp_dens_species_deallocate_gpu(struct interpDensSpecies_a* ids_gpu) { hipFree(&(ids_gpu->rhon_flat)); hipFree(&(ids_gpu->rhoc_flat)); hipFree(&(ids_gpu->Jx_flat)); hipFree(&(ids_gpu->Jy_flat)); hipFree(&(ids_gpu->Jz_flat)); hipFree(&(ids_gpu->pxx_flat)); hipFree(&(ids_gpu->pxy_flat)); hipFree(&(ids_gpu->pxz_flat)); hipFree(&(ids_gpu->pyy_flat)); hipFree(&(ids_gpu->pyz_flat)); hipFree(&(ids_gpu->pzz_flat)); } void interp_dens_species_allocate_gpu(struct interpDensSpecies_a* ids_gpu, struct grid* grd) { FPinterp* d_ids[11]; for (int i = 0; i < 10; ++i) hipMalloc(&d_ids[i], grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp)); ids_gpu->rhon_flat = d_ids[0]; ids_gpu->Jx_flat = d_ids[1]; ids_gpu->Jy_flat = d_ids[2]; ids_gpu->Jz_flat = d_ids[3]; ids_gpu->pxx_flat = d_ids[4]; ids_gpu->pxy_flat = d_ids[5]; ids_gpu->pxz_flat = d_ids[6]; ids_gpu->pyy_flat = d_ids[7]; ids_gpu->pyz_flat = d_ids[8]; ids_gpu->pzz_flat = d_ids[9]; hipMalloc(&d_ids[10], grd->nxc*grd->nyc*grd->nzc*sizeof(FPinterp)); ids_gpu->rhoc_flat = d_ids[10]; } void ids_copy(struct interpDensSpecies* ids, struct interpDensSpecies_a* ids_gpu, struct grid* grd, copy_way c) { if (c == CPU_TO_GPU) { hipMemcpy(ids_gpu->rhon_flat, ids->rhon_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyHostToDevice); hipMemcpy(ids_gpu->rhoc_flat, ids->rhoc_flat, grd->nxc*grd->nyc*grd->nzc*sizeof(FPinterp), hipMemcpyHostToDevice); hipMemcpy(ids_gpu->Jx_flat, ids->Jx_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyHostToDevice); hipMemcpy(ids_gpu->Jy_flat, ids->Jy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyHostToDevice); hipMemcpy(ids_gpu->Jz_flat, ids->Jz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyHostToDevice); hipMemcpy(ids_gpu->pxx_flat, ids->pxx_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyHostToDevice); hipMemcpy(ids_gpu->pxy_flat, ids->pxy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyHostToDevice); hipMemcpy(ids_gpu->pxz_flat, ids->pxz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyHostToDevice); hipMemcpy(ids_gpu->pyy_flat, ids->pyy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyHostToDevice); hipMemcpy(ids_gpu->pyz_flat, ids->pyz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyHostToDevice); hipMemcpy(ids_gpu->pzz_flat, ids->pzz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyHostToDevice); } else { hipMemcpy(ids->rhon_flat, ids_gpu->rhon_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyDeviceToHost); hipMemcpy(ids->rhoc_flat, ids_gpu->rhoc_flat, grd->nxc*grd->nyc*grd->nzc*sizeof(FPinterp), hipMemcpyDeviceToHost); hipMemcpy(ids->Jx_flat, ids_gpu->Jx_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyDeviceToHost); hipMemcpy(ids->Jy_flat, ids_gpu->Jy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyDeviceToHost); hipMemcpy(ids->Jz_flat, ids_gpu->Jz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyDeviceToHost); hipMemcpy(ids->pxx_flat, ids_gpu->pxx_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyDeviceToHost); hipMemcpy(ids->pxy_flat, ids_gpu->pxy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyDeviceToHost); hipMemcpy(ids->pxz_flat, ids_gpu->pxz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyDeviceToHost); hipMemcpy(ids->pyy_flat, ids_gpu->pyy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyDeviceToHost); hipMemcpy(ids->pyz_flat, ids_gpu->pyz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyDeviceToHost); hipMemcpy(ids->pzz_flat, ids_gpu->pzz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), hipMemcpyDeviceToHost); } } /** allocated interpolated densities per species */ void interp_dens_species_allocate(struct grid* grd, struct interpDensSpecies* ids, int is) { // set species ID ids->species_ID = is; // allocate 3D arrays // rho: 1 ids->rhon = newArr3<FPinterp>(&ids->rhon_flat, grd->nxn, grd->nyn, grd->nzn); // nodes ids->rhoc = newArr3<FPinterp>(&ids->rhoc_flat, grd->nxc, grd->nyc, grd->nzc); // center // Jx: 2 ids->Jx = newArr3<FPinterp>(&ids->Jx_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Jy: 3 ids->Jy = newArr3<FPinterp>(&ids->Jy_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Jz: 4 ids->Jz = newArr3<FPinterp>(&ids->Jz_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pxx: 5 ids->pxx = newArr3<FPinterp>(&ids->pxx_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pxy: 6 ids->pxy = newArr3<FPinterp>(&ids->pxy_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pxz: 7 ids->pxz = newArr3<FPinterp>(&ids->pxz_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pyy: 8 ids->pyy = newArr3<FPinterp>(&ids->pyy_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pyz: 9 ids->pyz = newArr3<FPinterp>(&ids->pyz_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pzz: 10 ids->pzz = newArr3<FPinterp>(&ids->pzz_flat, grd->nxn, grd->nyn, grd->nzn); // nodes } /** deallocate interpolated densities per species */ void interp_dens_species_deallocate(struct grid* grd, struct interpDensSpecies* ids) { // deallocate 3D arrays delArr3(ids->rhon, grd->nxn, grd->nyn); delArr3(ids->rhoc, grd->nxc, grd->nyc); // deallocate 3D arrays: J - current delArr3(ids->Jx, grd->nxn, grd->nyn); delArr3(ids->Jy, grd->nxn, grd->nyn); delArr3(ids->Jz, grd->nxn, grd->nyn); // deallocate 3D arrays: pressure delArr3(ids->pxx, grd->nxn, grd->nyn); delArr3(ids->pxy, grd->nxn, grd->nyn); delArr3(ids->pxz, grd->nxn, grd->nyn); delArr3(ids->pyy, grd->nxn, grd->nyn); delArr3(ids->pyz, grd->nxn, grd->nyn); delArr3(ids->pzz, grd->nxn, grd->nyn); } /** deallocate interpolated densities per species */ void interpN2Crho(struct interpDensSpecies* ids, struct grid* grd){ for (register int i = 1; i < grd->nxc - 1; i++) for (register int j = 1; j < grd->nyc - 1; j++) for (register int k = 1; k < grd->nzc - 1; k++){ ids->rhoc[i][j][k] = .125 * (ids->rhon[i][j][k] + ids->rhon[i + 1][j][k] + ids->rhon[i][j + 1][k] + ids->rhon[i][j][k + 1] + ids->rhon[i + 1][j + 1][k]+ ids->rhon[i + 1][j][k + 1] + ids->rhon[i][j + 1][k + 1] + ids->rhon[i + 1][j + 1][k + 1]); } }
2713ef3a4e4e4804edc683119c136b8decd1ab8e.cu
#include "InterpDensSpecies.h" void interp_dens_species_deallocate_gpu(struct interpDensSpecies_a* ids_gpu) { cudaFree(&(ids_gpu->rhon_flat)); cudaFree(&(ids_gpu->rhoc_flat)); cudaFree(&(ids_gpu->Jx_flat)); cudaFree(&(ids_gpu->Jy_flat)); cudaFree(&(ids_gpu->Jz_flat)); cudaFree(&(ids_gpu->pxx_flat)); cudaFree(&(ids_gpu->pxy_flat)); cudaFree(&(ids_gpu->pxz_flat)); cudaFree(&(ids_gpu->pyy_flat)); cudaFree(&(ids_gpu->pyz_flat)); cudaFree(&(ids_gpu->pzz_flat)); } void interp_dens_species_allocate_gpu(struct interpDensSpecies_a* ids_gpu, struct grid* grd) { FPinterp* d_ids[11]; for (int i = 0; i < 10; ++i) cudaMalloc(&d_ids[i], grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp)); ids_gpu->rhon_flat = d_ids[0]; ids_gpu->Jx_flat = d_ids[1]; ids_gpu->Jy_flat = d_ids[2]; ids_gpu->Jz_flat = d_ids[3]; ids_gpu->pxx_flat = d_ids[4]; ids_gpu->pxy_flat = d_ids[5]; ids_gpu->pxz_flat = d_ids[6]; ids_gpu->pyy_flat = d_ids[7]; ids_gpu->pyz_flat = d_ids[8]; ids_gpu->pzz_flat = d_ids[9]; cudaMalloc(&d_ids[10], grd->nxc*grd->nyc*grd->nzc*sizeof(FPinterp)); ids_gpu->rhoc_flat = d_ids[10]; } void ids_copy(struct interpDensSpecies* ids, struct interpDensSpecies_a* ids_gpu, struct grid* grd, copy_way c) { if (c == CPU_TO_GPU) { cudaMemcpy(ids_gpu->rhon_flat, ids->rhon_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyHostToDevice); cudaMemcpy(ids_gpu->rhoc_flat, ids->rhoc_flat, grd->nxc*grd->nyc*grd->nzc*sizeof(FPinterp), cudaMemcpyHostToDevice); cudaMemcpy(ids_gpu->Jx_flat, ids->Jx_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyHostToDevice); cudaMemcpy(ids_gpu->Jy_flat, ids->Jy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyHostToDevice); cudaMemcpy(ids_gpu->Jz_flat, ids->Jz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyHostToDevice); cudaMemcpy(ids_gpu->pxx_flat, ids->pxx_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyHostToDevice); cudaMemcpy(ids_gpu->pxy_flat, ids->pxy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyHostToDevice); cudaMemcpy(ids_gpu->pxz_flat, ids->pxz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyHostToDevice); cudaMemcpy(ids_gpu->pyy_flat, ids->pyy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyHostToDevice); cudaMemcpy(ids_gpu->pyz_flat, ids->pyz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyHostToDevice); cudaMemcpy(ids_gpu->pzz_flat, ids->pzz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyHostToDevice); } else { cudaMemcpy(ids->rhon_flat, ids_gpu->rhon_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyDeviceToHost); cudaMemcpy(ids->rhoc_flat, ids_gpu->rhoc_flat, grd->nxc*grd->nyc*grd->nzc*sizeof(FPinterp), cudaMemcpyDeviceToHost); cudaMemcpy(ids->Jx_flat, ids_gpu->Jx_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyDeviceToHost); cudaMemcpy(ids->Jy_flat, ids_gpu->Jy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyDeviceToHost); cudaMemcpy(ids->Jz_flat, ids_gpu->Jz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyDeviceToHost); cudaMemcpy(ids->pxx_flat, ids_gpu->pxx_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyDeviceToHost); cudaMemcpy(ids->pxy_flat, ids_gpu->pxy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyDeviceToHost); cudaMemcpy(ids->pxz_flat, ids_gpu->pxz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyDeviceToHost); cudaMemcpy(ids->pyy_flat, ids_gpu->pyy_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyDeviceToHost); cudaMemcpy(ids->pyz_flat, ids_gpu->pyz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyDeviceToHost); cudaMemcpy(ids->pzz_flat, ids_gpu->pzz_flat, grd->nxn*grd->nyn*grd->nzn*sizeof(FPinterp), cudaMemcpyDeviceToHost); } } /** allocated interpolated densities per species */ void interp_dens_species_allocate(struct grid* grd, struct interpDensSpecies* ids, int is) { // set species ID ids->species_ID = is; // allocate 3D arrays // rho: 1 ids->rhon = newArr3<FPinterp>(&ids->rhon_flat, grd->nxn, grd->nyn, grd->nzn); // nodes ids->rhoc = newArr3<FPinterp>(&ids->rhoc_flat, grd->nxc, grd->nyc, grd->nzc); // center // Jx: 2 ids->Jx = newArr3<FPinterp>(&ids->Jx_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Jy: 3 ids->Jy = newArr3<FPinterp>(&ids->Jy_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Jz: 4 ids->Jz = newArr3<FPinterp>(&ids->Jz_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pxx: 5 ids->pxx = newArr3<FPinterp>(&ids->pxx_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pxy: 6 ids->pxy = newArr3<FPinterp>(&ids->pxy_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pxz: 7 ids->pxz = newArr3<FPinterp>(&ids->pxz_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pyy: 8 ids->pyy = newArr3<FPinterp>(&ids->pyy_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pyz: 9 ids->pyz = newArr3<FPinterp>(&ids->pyz_flat, grd->nxn, grd->nyn, grd->nzn); // nodes // Pzz: 10 ids->pzz = newArr3<FPinterp>(&ids->pzz_flat, grd->nxn, grd->nyn, grd->nzn); // nodes } /** deallocate interpolated densities per species */ void interp_dens_species_deallocate(struct grid* grd, struct interpDensSpecies* ids) { // deallocate 3D arrays delArr3(ids->rhon, grd->nxn, grd->nyn); delArr3(ids->rhoc, grd->nxc, grd->nyc); // deallocate 3D arrays: J - current delArr3(ids->Jx, grd->nxn, grd->nyn); delArr3(ids->Jy, grd->nxn, grd->nyn); delArr3(ids->Jz, grd->nxn, grd->nyn); // deallocate 3D arrays: pressure delArr3(ids->pxx, grd->nxn, grd->nyn); delArr3(ids->pxy, grd->nxn, grd->nyn); delArr3(ids->pxz, grd->nxn, grd->nyn); delArr3(ids->pyy, grd->nxn, grd->nyn); delArr3(ids->pyz, grd->nxn, grd->nyn); delArr3(ids->pzz, grd->nxn, grd->nyn); } /** deallocate interpolated densities per species */ void interpN2Crho(struct interpDensSpecies* ids, struct grid* grd){ for (register int i = 1; i < grd->nxc - 1; i++) for (register int j = 1; j < grd->nyc - 1; j++) for (register int k = 1; k < grd->nzc - 1; k++){ ids->rhoc[i][j][k] = .125 * (ids->rhon[i][j][k] + ids->rhon[i + 1][j][k] + ids->rhon[i][j + 1][k] + ids->rhon[i][j][k + 1] + ids->rhon[i + 1][j + 1][k]+ ids->rhon[i + 1][j][k + 1] + ids->rhon[i][j + 1][k + 1] + ids->rhon[i + 1][j + 1][k + 1]); } }
2dec25a6be4ba6ada5ccafbeaa483261ad142798.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* part4.cu */ #include "part4_conf.h" #include "part4_cpu.h" #include "part4_kernel.cu" /* ====================== Main_CPU =========================== */ int main(int argc, char* argv[]) { int *a, *a_device, *b, *b_device; int *solution, *solution_device, *meilleure_solution; int *voisin, *voisin_device, *ij; int i, j, k, score, best_score; int n, temp, m, condition, nb_blocks, nb_solution; int seed = time(NULL); // donnes temporelles clock_t initial_time; /* Initial time in micro-seconds */ clock_t final_time; /* Final time in micro-seconds */ float cpu_time; /* Total time of the cpu in seconds */ float gpu_time; /* Total time of the gpu in seconds */ if (argc < 3) { printf("Please give a data file in argument 1 and the number of iterations in argument 2.\n"); exit(1); } loadInstances(argv[1],n,a,b); nb_solution = atoi(argv[2]); m = n*(n-1)/2; // taille du tableau voisin nb_blocks = m/NB_THREAD; // nombre de blocs if ( m % NB_THREAD != 0) nb_blocks++; // Allocations dynamiques solution = (int*) malloc(n*sizeof(int)); meilleure_solution = (int*) malloc(n*sizeof(int)); voisin = (int*) malloc(m*sizeof(int)); ij = (int*) malloc(3*sizeof(int)); hipMalloc( (void **) &a_device, n*n*sizeof(int) ); hipMalloc( (void **) &b_device, n*n*sizeof(int) ); hipMalloc( (void **) &solution_device, n*sizeof(int) ); hipMalloc( (void **) &voisin_device, m*sizeof(int) ); hipMemcpy( a_device, a, n*n*sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( b_device, b, n*n*sizeof(int), hipMemcpyHostToDevice ); /* ================================================================== = = = Multistart CPU : = = = = on lance le hill-climbing nb_solution fois = = (rentr en ligne de commande) = = = ================================================================== */ /* ces instructions sont les mmes que dans pour le multistart du gpu donc je me permets de ne pas les commenter.*/ initial_time = clock(); srand(seed); for (k=0; k<nb_solution; k++) { create(solution,n); score = evaluation(a,b,solution,n); condition = 0; while ( condition == 0 ) { ij[0] = 1; // ij[0] = i ij[1] = 1; // ij(1] = j ij[2] = 0; // ij[2] = d (dcalage) for (i=0; i<n-1; i++) { for (j=i+1; j<n; j++) { temp = compute_delta_cpu(a, b, solution, i, j, n); if (temp < ij[2]) { ij[0] = i; ij[1] = j; ij[2] = temp; } } } if (ij[2] >= 0) condition = 1; temp = solution[ij[0]]; solution[ij[0]] = solution[ij[1]]; solution[ij[1]] = temp; score = score + ij[2]; } if ( (k == 0) || ( (k != 0) && (score < best_score) ) ) { memcpy(meilleure_solution, solution, n*sizeof(int)); best_score = score; } } final_time = clock(); cpu_time = (final_time - initial_time)*1e-6; // affichage des rsultats finaux sur CPU printf("Le meilleur score trouv par les Hill-climbing avec le CPU est :\n"); printf("z(pi) = %d \n", best_score); printf("pi = [ "); for (k=0; k<n; k++) printf("%d ", meilleure_solution[k]); printf("] \n"); printf("Temps d'excution CPU : %f s\n\n", cpu_time); /* ================================================================== = = = Multistart GPU : = = = = on lance le hill-climbing nb_solution fois = = (rentr en ligne de commande) = = = ================================================================== */ initial_time = clock(); srand(seed); for (k=0; k<nb_solution; k++) { create(solution,n); // gnre une solution pi score = evaluation(a,b,solution,n); // value le score z(pi) condition = 0; // boolen conditionnel pour la boucle // Recherche d'une meilleure solution tant qu'un voisin en propose une while ( condition == 0 ) { ij[0] = 1; // ij[0] = i ij[1] = 1; // ij(1] = j ij[2] = 0; // ij[2] = d (dcalage) hipMemcpy( solution_device, solution, n*sizeof(int), hipMemcpyHostToDevice ); // le gpu calcule le dcalage d'un voisin hipLaunchKernelGGL(( main_gpu), dim3(nb_blocks), dim3(NB_THREAD), 0, 0, voisin_device, a_device, b_device, solution_device, n); hipMemcpy( voisin, voisin_device, m*sizeof(int), hipMemcpyDeviceToHost ); // le cpu dfinit les lments de solution permuter pour avoir un meilleur score min_tab(ij, voisin, m, n, condition); // on permute les lments trouvs temp = solution[ij[0]]; solution[ij[0]] = solution[ij[1]]; solution[ij[1]] = temp; // on calcule le nouveau score score = score + ij[2]; } // initialisation de la meilleure solution et du meilleur score l'tape k=0 et cre la meilleure solution et le meilleur score si il en a un aprs if ( (k == 0) || ( (k != 0) && (score < best_score) ) ) { memcpy(meilleure_solution, solution, n*sizeof(int)); best_score = score; } } final_time = clock(); gpu_time = (final_time - initial_time)*1e-6; printf("Le meilleur score trouv par les Hill-climbing avec le GPU est :\n"); printf("z(pi) = %d \n", best_score); printf("pi = [ "); for (k=0; k<n; k++) printf("%d ", meilleure_solution[k]); printf("] \n"); printf("Temps d'excution GPU : %f s\n\n", gpu_time); /* ====================================================== = = = fin de ce merveilleux programme = = = ====================================================== */ // dsallocation des tableaux hipFree(a_device); hipFree(b_device); hipFree(solution_device); hipFree(voisin_device); free(a); free(b); free(ij); free(solution); free(voisin); free(meilleure_solution); }
2dec25a6be4ba6ada5ccafbeaa483261ad142798.cu
/* part4.cu */ #include "part4_conf.h" #include "part4_cpu.h" #include "part4_kernel.cu" /* ====================== Main_CPU =========================== */ int main(int argc, char* argv[]) { int *a, *a_device, *b, *b_device; int *solution, *solution_device, *meilleure_solution; int *voisin, *voisin_device, *ij; int i, j, k, score, best_score; int n, temp, m, condition, nb_blocks, nb_solution; int seed = time(NULL); // données temporelles clock_t initial_time; /* Initial time in micro-seconds */ clock_t final_time; /* Final time in micro-seconds */ float cpu_time; /* Total time of the cpu in seconds */ float gpu_time; /* Total time of the gpu in seconds */ if (argc < 3) { printf("Please give a data file in argument 1 and the number of iterations in argument 2.\n"); exit(1); } loadInstances(argv[1],n,a,b); nb_solution = atoi(argv[2]); m = n*(n-1)/2; // taille du tableau voisin nb_blocks = m/NB_THREAD; // nombre de blocs if ( m % NB_THREAD != 0) nb_blocks++; // Allocations dynamiques solution = (int*) malloc(n*sizeof(int)); meilleure_solution = (int*) malloc(n*sizeof(int)); voisin = (int*) malloc(m*sizeof(int)); ij = (int*) malloc(3*sizeof(int)); cudaMalloc( (void **) &a_device, n*n*sizeof(int) ); cudaMalloc( (void **) &b_device, n*n*sizeof(int) ); cudaMalloc( (void **) &solution_device, n*sizeof(int) ); cudaMalloc( (void **) &voisin_device, m*sizeof(int) ); cudaMemcpy( a_device, a, n*n*sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( b_device, b, n*n*sizeof(int), cudaMemcpyHostToDevice ); /* ================================================================== = = = Multistart CPU : = = = = on lance le hill-climbing nb_solution fois = = (rentré en ligne de commande) = = = ================================================================== */ /* ces instructions sont les mêmes que dans pour le multistart du gpu donc je me permets de ne pas les commenter.*/ initial_time = clock(); srand(seed); for (k=0; k<nb_solution; k++) { create(solution,n); score = evaluation(a,b,solution,n); condition = 0; while ( condition == 0 ) { ij[0] = 1; // ij[0] = i ij[1] = 1; // ij(1] = j ij[2] = 0; // ij[2] = d (décalage) for (i=0; i<n-1; i++) { for (j=i+1; j<n; j++) { temp = compute_delta_cpu(a, b, solution, i, j, n); if (temp < ij[2]) { ij[0] = i; ij[1] = j; ij[2] = temp; } } } if (ij[2] >= 0) condition = 1; temp = solution[ij[0]]; solution[ij[0]] = solution[ij[1]]; solution[ij[1]] = temp; score = score + ij[2]; } if ( (k == 0) || ( (k != 0) && (score < best_score) ) ) { memcpy(meilleure_solution, solution, n*sizeof(int)); best_score = score; } } final_time = clock(); cpu_time = (final_time - initial_time)*1e-6; // affichage des résultats finaux sur CPU printf("Le meilleur score trouvé par les Hill-climbing avec le CPU est :\n"); printf("z(pi) = %d \n", best_score); printf("pi = [ "); for (k=0; k<n; k++) printf("%d ", meilleure_solution[k]); printf("] \n"); printf("Temps d'exécution CPU : %f s\n\n", cpu_time); /* ================================================================== = = = Multistart GPU : = = = = on lance le hill-climbing nb_solution fois = = (rentré en ligne de commande) = = = ================================================================== */ initial_time = clock(); srand(seed); for (k=0; k<nb_solution; k++) { create(solution,n); // génère une solution pi score = evaluation(a,b,solution,n); // évalue le score z(pi) condition = 0; // booléen conditionnel pour la boucle // Recherche d'une meilleure solution tant qu'un voisin en propose une while ( condition == 0 ) { ij[0] = 1; // ij[0] = i ij[1] = 1; // ij(1] = j ij[2] = 0; // ij[2] = d (décalage) cudaMemcpy( solution_device, solution, n*sizeof(int), cudaMemcpyHostToDevice ); // le gpu calcule le décalage d'un voisin main_gpu<<<nb_blocks, NB_THREAD>>>(voisin_device, a_device, b_device, solution_device, n); cudaMemcpy( voisin, voisin_device, m*sizeof(int), cudaMemcpyDeviceToHost ); // le cpu définit les éléments de solution à permuter pour avoir un meilleur score min_tab(ij, voisin, m, n, condition); // on permute les éléments trouvés temp = solution[ij[0]]; solution[ij[0]] = solution[ij[1]]; solution[ij[1]] = temp; // on calcule le nouveau score score = score + ij[2]; } // initialisation de la meilleure solution et du meilleur score à l'étape k=0 et crée la meilleure solution et le meilleur score si il en a un après if ( (k == 0) || ( (k != 0) && (score < best_score) ) ) { memcpy(meilleure_solution, solution, n*sizeof(int)); best_score = score; } } final_time = clock(); gpu_time = (final_time - initial_time)*1e-6; printf("Le meilleur score trouvé par les Hill-climbing avec le GPU est :\n"); printf("z(pi) = %d \n", best_score); printf("pi = [ "); for (k=0; k<n; k++) printf("%d ", meilleure_solution[k]); printf("] \n"); printf("Temps d'exécution GPU : %f s\n\n", gpu_time); /* ====================================================== = = = fin de ce merveilleux programme = = = ====================================================== */ // désallocation des tableaux cudaFree(a_device); cudaFree(b_device); cudaFree(solution_device); cudaFree(voisin_device); free(a); free(b); free(ij); free(solution); free(voisin); free(meilleure_solution); }
058f4f454feb9b22fdace999ee39617ac598315e.hip
// !!! This is a file automatically generated by hipify!!! /* #include <cmath> #include <cstdio> #include <hip/hip_runtime.h> #include "dock.h" #include "gpu.cuh" */ /* #define expf(a) (a) #define powf(a,b) (a+b) #define logf(a) (a) #define sqrtf(a) (a) */ #include <assert.h> __device__ void CalcEnergy_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt) { // reduce all points on the X-Y plate __shared__ float evdw[TperB]; // e[0] __shared__ float eele[TperB]; // e[1] __shared__ float epmf[TperB]; // e[2] __shared__ float epsp[TperB]; // e[3] __shared__ float ehdb[TperB]; // e[4] // reduce through only x axis __shared__ float a_val[BDy][BDx]; // reused by hpc, kde, lhm ??????? __shared__ float a_sz[BDy][BDx]; // ??????? __shared__ float ehpc[BDy]; // e[5] __shared__ float ekde[BDy]; // e[6] __shared__ float elhm[BDy]; // e[7] evdw[bidx] = 0.0f; eele[bidx] = 0.0f; epmf[bidx] = 0.0f; epsp[bidx] = 0.0f; ehdb[bidx] = 0.0f; if (bidx < BDy) { ehpc[bidx] = 0.0f; ekde[bidx] = 0.0f; elhm[bidx] = 0.0f; } __syncthreads (); // lig loop, ~30 for (int i = 0; i < lna_dc; i += blockDim.y) { a_val[threadIdx.y][threadIdx.x] = 0.0f; const int l = i + threadIdx.y; if (l < lna_dc) { const int lig_t = mylig->t[l]; // prt loop, ~300 for (int j = 0; j < pnp_dc; j += blockDim.x) { const int p = j + threadIdx.x; if (p < pnp_dc) { const int prt_t = myprt->t[p]; const float dx = mylig->coord_new.x[l] - myprt->x[p]; const float dy = mylig->coord_new.y[l] - myprt->y[p]; const float dz = mylig->coord_new.z[l] - myprt->z[p]; const float dst_pow2 = dx * dx + dy * dy + dz * dz; const float dst_pow4 = dst_pow2 * dst_pow2; const float dst = sqrtf (dst_pow2); /* hydrophobic potential */ if (myprt->c0_and_d12_or_c2[p] == 1 && dst_pow2 <= 81.0f) { a_val[threadIdx.y][threadIdx.x] += myprt->hpp[p] * (1.0f - (3.5f / 81.0f * dst_pow2 - 4.5f / 81.0f / 81.0f * dst_pow4 + 2.5f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow2 - 0.5f / 81.0f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow4)); } /* L-J potential */ const float p1 = enepara_dc->p1a[lig_t][prt_t] / (dst_pow4 * dst_pow4 * dst); const float p2 = enepara_dc->p2a[lig_t][prt_t] / (dst_pow4 * dst_pow2); const float p4 = p1 * enepara_lj0_dc * (1.0f + enepara_lj1_dc * dst_pow2) + 1.0f; evdw[bidx] += (p1 - p2) / p4; /* electrostatic potential */ const float s1 = enepara_el1_dc * dst; float g1; if (s1 < 1) g1 = enepara_el0_dc + enepara_a1_dc * s1 * s1 + enepara_b1_dc * s1 * s1 * s1; else g1 = 1.0f / s1; eele[bidx] += mylig->c[l] * myprt->ele[p] * g1; /* contact potential */ const float dst_minus_pmf0 = dst - enepara_dc->pmf0[lig_t][prt_t]; epmf[bidx] += enepara_dc->pmf1[lig_t][prt_t] / (1.0f + expf ((-0.5f * dst + 6.0f) * dst_minus_pmf0)); /* pocket-specific potential */ // the senmatics do not match with the original program: // if (found psp[][]) // accumulate to epsp; // else // do nothing if (myprt->c[p] == 2 && dst_minus_pmf0 <= 0) { const int i1 = myprt->seq3r[p]; epsp[bidx] += psp_dc->psp[lig_t][i1]; // sparse matrix } /* hydrogen bond potential */ const float hdb0 = enepara_dc->hdb0[lig_t][prt_t]; if (hdb0 > 0.1f) { const float hdb1 = enepara_dc->hdb1[lig_t][prt_t]; const float hdb3 = (dst - hdb0) * hdb1; ehdb[bidx] += hdb1 * expf (-0.5f * hdb3 * hdb3); } } // if (p < pnp_dc) } // prt loop } // if (l < lna_dc) /* hydrophobic restraits*/ SumReduction2D_d (a_val); // transpose may help improve the performance if (threadIdx.x == 0 && l < lna_dc) { const int lig_t = mylig->t[l]; const float hpc2 = (a_val[threadIdx.y][0] - enepara_dc->hpl0[lig_t]) / enepara_dc->hpl1[lig_t]; ehpc[threadIdx.y] += 0.5f * hpc2 * hpc2 - enepara_dc->hpl2[lig_t]; } } // lig loop SumReduction1D_5_d (bidx, evdw, eele, epmf, epsp, ehdb); if (bidx == 0) { float eehpc = 0.0f; for (int i = 0; i < BDy; ++i) eehpc += ehpc[i]; ehpc[0] = eehpc; } #if 1 /* kde potential */ // lig loop, ~30 for (int i = 0; i < lna_dc; i += blockDim.y) { a_val[threadIdx.y][threadIdx.x] = 0.0f; a_sz[threadIdx.y][threadIdx.x] = 0.0f; const int l = i + threadIdx.y; if (l < lna_dc) { // kde loop, ~400 for (int j = 0; j < pnk_dc; j += blockDim.x) { const int k = j + threadIdx.x; if (k < pnk_dc) { if (mylig->t[l] == kde_dc->t[k]) { const float dx = mylig->coord_new.x[l] - kde_dc->x[k]; const float dy = mylig->coord_new.y[l] - kde_dc->y[k]; const float dz = mylig->coord_new.z[l] - kde_dc->z[k]; const float kde_dst_pow2 = dx * dx + dy * dy + dz * dz; a_val[threadIdx.y][threadIdx.x] += expf (enepara_kde2_dc * kde_dst_pow2); a_sz[threadIdx.y][threadIdx.x] += 1.0f; } } // if (k < pnk_dc) } // kde loop } // if (l < lna_dc) SumReduction2D_2_d (a_val, a_sz); if (threadIdx.x == 0 && l < lna_dc && a_sz[threadIdx.y][0] != 0.0f) ekde[threadIdx.y] += (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]); } // lig loop __syncthreads (); if (bidx == 0) { float eekde = 0.0f; for (int i = 0; i < BDy; ++i) eekde += ekde[i]; eekde = eekde / enepara_kde3_dc; ekde[0] = eekde; } __syncthreads (); #endif #if 1 /* position restraints */ // lhm loop, ~11 for (int i = 0; i < pos_dc; i += blockDim.y) { a_val[threadIdx.y][threadIdx.x] = 0.0f; a_sz[threadIdx.y][threadIdx.x] = 0.0f; const int m = i + threadIdx.y; if (m < pos_dc) { // lig loop, ~30 for (int j = 0; j < lna_dc; j += blockDim.x) { const int l = j + threadIdx.x; if (l < lna_dc) { const int lig_n = mylig->n[l] + 1; if (mcs_dc[m].x[lig_n] != MCS_INVALID_COORD) { const float dx = mylig->coord_new.x[l] - mcs_dc[m].x[lig_n]; const float dy = mylig->coord_new.y[l] - mcs_dc[m].y[lig_n]; const float dz = mylig->coord_new.z[l] - mcs_dc[m].z[lig_n]; a_val[threadIdx.y][threadIdx.x] += dx * dx + dy * dy + dz * dz; a_sz[threadIdx.y][threadIdx.x] += 1.0f; } } // if (l < lna_dc) } // lig loop } // if (m < pos_dc) SumReduction2D_2_d (a_val, a_sz); if (threadIdx.x == 0 && m < pos_dc) { elhm[threadIdx.y] += mcs_dc[m].tcc * sqrtf (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]); } } // lhm loop __syncthreads (); if (bidx == 0) { float eelhm = 0.0f; for (int i = 0; i < BDy; ++i) eelhm += elhm[i]; // dropped the protection (if pos_dc != 0) eelhm = logf (eelhm / pos_dc); elhm[0] = eelhm; } __syncthreads (); #endif // energy edst e[8] __shared__ float edst; if (bidx == 0) { const float dx = mylig->coord_new.center[0] - myprt->pocket_center[0]; const float dy = mylig->coord_new.center[1] - myprt->pocket_center[1]; const float dz = mylig->coord_new.center[2] - myprt->pocket_center[2]; edst = sqrtf (dx * dx + dy * dy + dz * dz); } __syncthreads (); if (bidx == 0) { evdw[0] = evdw[0] / lna_dc; eele[0] = eele[0] / lna_dc; epmf[0] = epmf[0] / lna_dc; epsp[0] = epsp[0] / lna_dc; ehdb[0] = ehdb[0] / lna_dc / sqrtf (2.0f * PI) * -1.0f; // ehdb[0] = ehdb[0] / lna_dc; // using hdb2 is faster ehpc[0] = ehpc[0] / lna_dc; ekde[0] = ekde[0] / lna_dc; // calculate normalized energy evdw[0] = enepara_dc->a_para[0] * evdw[0] + enepara_dc->b_para[0]; eele[0] = enepara_dc->a_para[1] * eele[0] + enepara_dc->b_para[1]; epmf[0] = enepara_dc->a_para[2] * epmf[0] + enepara_dc->b_para[2]; ehpc[0] = enepara_dc->a_para[3] * ehpc[0] + enepara_dc->b_para[3]; ehdb[0] = enepara_dc->a_para[4] * ehdb[0] + enepara_dc->b_para[4]; edst = enepara_dc->a_para[5] * edst + enepara_dc->b_para[5]; epsp[0] = enepara_dc->a_para[6] * epsp[0] + enepara_dc->b_para[6]; ekde[0] = enepara_dc->a_para[7] * ekde[0] + enepara_dc->b_para[7]; elhm[0] = enepara_dc->a_para[8] * elhm[0] + enepara_dc->b_para[8]; // printf("lhm: %f\n", elhm[0]); #if IS_BAYE == 1 // calculate conditional prob belonging to high decoy const float evdw_h = NormPdf(evdw[0], VDW_NORM_HIGH_LOC, VDW_NORM_HIGH_SCALE); const float evdw_l = NormPdf(evdw[0], VDW_NORM_LOW_LOC, VDW_NORM_LOW_SCALE); const float eele_h = CauchyPdf(eele[0], ELE_CAUCHY_HIGH_LOC, ELE_CAUCHY_HIGH_SCALE); const float eele_l = CauchyPdf(eele[0], ELE_CAUCHY_LOW_LOC, ELE_CAUCHY_LOW_SCALE); const float epmf_h = LogisticPdf(epmf[0], PMF_LOGISTIC_HIGH_LOC, PMF_LOGISTIC_HIGH_SCALE); const float epmf_l = LogisticPdf(epmf[0], PMF_LOGISTIC_LOW_LOC, PMF_LOGISTIC_LOW_SCALE); const float ehpc_h = WaldPdf(ehpc[0], HPC_WALD_HIGH_LOC, HPC_WALD_HIGH_SCALE); const float ehpc_l = WaldPdf(ehpc[0], HPC_WALD_LOW_LOC, HPC_WALD_LOW_SCALE); const float ehdb_h = NormPdf(ehdb[0], HDB_NORM_HIGH_LOC, HDB_NORM_HIGH_SCALE); const float ehdb_l = NormPdf(ehdb[0], HDB_LOGISTIC_LOW_LOC, HDB_LOGISTIC_LOW_SCALE); const float edst_h = LogisticPdf(edst, DST_LOGISTIC_HIGH_LOC, DST_LOGISTIC_HIGH_SCALE); const float edst_l = LogisticPdf(edst, DST_LOGISTIC_LOW_LOC, DST_LOGISTIC_LOW_SCALE); const float epsp_h = LogisticPdf(epsp[0], PSP_LOGISTIC_HIGH_LOC, PSP_LOGISTIC_HIGH_SCALE); const float epsp_l = LogisticPdf(epsp[0], PSP_LAPLACE_LOW_LOC, PSP_LAPLACE_LOW_SCALE); const float ekde_h = WaldPdf(ekde[0], KDE_WALD_HIGH_LOC, KDE_WALD_HIGH_SCALE); const float ekde_l = WaldPdf(ekde[0], KDE_WALD_LOW_LOC, KDE_WALD_LOW_SCALE); const float elhm_h = LogisticPdf(elhm[0], LHM_LOGISTIC_HIGH_LOC, LHM_LOGISTIC_HIGH_SCALE); const float elhm_l = LogisticPdf(elhm[0], LHM_LOGISTIC_LOW_LOC, LHM_LOGISTIC_LOW_SCALE); // assert (evdw_h != 0 ); // assert (evdw_l != 0 ); // assert (eele_h != 0 ); // assert (eele_l != 0 ); // assert (epmf_h != 0 ); // assert (epmf_l != 0 ); // assert (ehpc_h != 0 ); // assert (ehpc_l != 0 ); // assert (ehdb_h != 0 ); // assert (ehdb_l != 0 ); // assert (edst_h != 0 ); // assert (edst_l != 0 ); // assert (epsp_h != 0 ); // assert (epsp_l != 0 ); // assert (ekde_h != 0 ); // assert (ekde_l != 0 ); // assert (elhm_h != 0 ); // assert (elhm_l != 0 ); // calculate conditional prob const float prob_h = log10f(evdw_h) + log10f(eele_h) + log10f(epmf_h) + log10f(ehpc_h) + log10f(ehdb_h) + log10f(edst_h) + log10f(epsp_h) + log10f(ekde_h) + log10f(elhm_h); const float prob_l = log10f(evdw_l) + log10f(eele_l) + log10f(epmf_l) + log10f(ehpc_l) + log10f(ehdb_l) + log10f(edst_l) + log10f(epsp_l) + log10f(ekde_l) + log10f(elhm_l); const float etotal = prob_l - prob_h; #elif IS_BAYE == 0 // calculate the total energy using linear combination const float etotal = enepara_dc->w[0] * evdw[0] + enepara_dc->w[1] * eele[0] + enepara_dc->w[2] * epmf[0] + enepara_dc->w[3] * ehpc[0] + enepara_dc->w[4] * ehdb[0] + enepara_dc->w[5] * edst + enepara_dc->w[6] * epsp[0] + enepara_dc->w[7] * ekde[0] + enepara_dc->w[8] * elhm[0]; #endif float * e = &mylig->energy_new.e[0]; e[0] = evdw[0]; e[1] = eele[0]; e[2] = epmf[0]; e[3] = epsp[0]; e[4] = ehdb[0]; e[5] = ehpc[0]; e[6] = ekde[0]; e[7] = elhm[0]; e[8] = edst; e[9] = etotal; // e[9] = edst; } }
058f4f454feb9b22fdace999ee39617ac598315e.cu
/* #include <cmath> #include <cstdio> #include <cuda.h> #include "dock.h" #include "gpu.cuh" */ /* #define expf(a) (a) #define powf(a,b) (a+b) #define logf(a) (a) #define sqrtf(a) (a) */ #include <assert.h> __device__ void CalcEnergy_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt) { // reduce all points on the X-Y plate __shared__ float evdw[TperB]; // e[0] __shared__ float eele[TperB]; // e[1] __shared__ float epmf[TperB]; // e[2] __shared__ float epsp[TperB]; // e[3] __shared__ float ehdb[TperB]; // e[4] // reduce through only x axis __shared__ float a_val[BDy][BDx]; // reused by hpc, kde, lhm ??????? __shared__ float a_sz[BDy][BDx]; // ??????? __shared__ float ehpc[BDy]; // e[5] __shared__ float ekde[BDy]; // e[6] __shared__ float elhm[BDy]; // e[7] evdw[bidx] = 0.0f; eele[bidx] = 0.0f; epmf[bidx] = 0.0f; epsp[bidx] = 0.0f; ehdb[bidx] = 0.0f; if (bidx < BDy) { ehpc[bidx] = 0.0f; ekde[bidx] = 0.0f; elhm[bidx] = 0.0f; } __syncthreads (); // lig loop, ~30 for (int i = 0; i < lna_dc; i += blockDim.y) { a_val[threadIdx.y][threadIdx.x] = 0.0f; const int l = i + threadIdx.y; if (l < lna_dc) { const int lig_t = mylig->t[l]; // prt loop, ~300 for (int j = 0; j < pnp_dc; j += blockDim.x) { const int p = j + threadIdx.x; if (p < pnp_dc) { const int prt_t = myprt->t[p]; const float dx = mylig->coord_new.x[l] - myprt->x[p]; const float dy = mylig->coord_new.y[l] - myprt->y[p]; const float dz = mylig->coord_new.z[l] - myprt->z[p]; const float dst_pow2 = dx * dx + dy * dy + dz * dz; const float dst_pow4 = dst_pow2 * dst_pow2; const float dst = sqrtf (dst_pow2); /* hydrophobic potential */ if (myprt->c0_and_d12_or_c2[p] == 1 && dst_pow2 <= 81.0f) { a_val[threadIdx.y][threadIdx.x] += myprt->hpp[p] * (1.0f - (3.5f / 81.0f * dst_pow2 - 4.5f / 81.0f / 81.0f * dst_pow4 + 2.5f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow2 - 0.5f / 81.0f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow4)); } /* L-J potential */ const float p1 = enepara_dc->p1a[lig_t][prt_t] / (dst_pow4 * dst_pow4 * dst); const float p2 = enepara_dc->p2a[lig_t][prt_t] / (dst_pow4 * dst_pow2); const float p4 = p1 * enepara_lj0_dc * (1.0f + enepara_lj1_dc * dst_pow2) + 1.0f; evdw[bidx] += (p1 - p2) / p4; /* electrostatic potential */ const float s1 = enepara_el1_dc * dst; float g1; if (s1 < 1) g1 = enepara_el0_dc + enepara_a1_dc * s1 * s1 + enepara_b1_dc * s1 * s1 * s1; else g1 = 1.0f / s1; eele[bidx] += mylig->c[l] * myprt->ele[p] * g1; /* contact potential */ const float dst_minus_pmf0 = dst - enepara_dc->pmf0[lig_t][prt_t]; epmf[bidx] += enepara_dc->pmf1[lig_t][prt_t] / (1.0f + expf ((-0.5f * dst + 6.0f) * dst_minus_pmf0)); /* pocket-specific potential */ // the senmatics do not match with the original program: // if (found psp[][]) // accumulate to epsp; // else // do nothing if (myprt->c[p] == 2 && dst_minus_pmf0 <= 0) { const int i1 = myprt->seq3r[p]; epsp[bidx] += psp_dc->psp[lig_t][i1]; // sparse matrix } /* hydrogen bond potential */ const float hdb0 = enepara_dc->hdb0[lig_t][prt_t]; if (hdb0 > 0.1f) { const float hdb1 = enepara_dc->hdb1[lig_t][prt_t]; const float hdb3 = (dst - hdb0) * hdb1; ehdb[bidx] += hdb1 * expf (-0.5f * hdb3 * hdb3); } } // if (p < pnp_dc) } // prt loop } // if (l < lna_dc) /* hydrophobic restraits*/ SumReduction2D_d (a_val); // transpose may help improve the performance if (threadIdx.x == 0 && l < lna_dc) { const int lig_t = mylig->t[l]; const float hpc2 = (a_val[threadIdx.y][0] - enepara_dc->hpl0[lig_t]) / enepara_dc->hpl1[lig_t]; ehpc[threadIdx.y] += 0.5f * hpc2 * hpc2 - enepara_dc->hpl2[lig_t]; } } // lig loop SumReduction1D_5_d (bidx, evdw, eele, epmf, epsp, ehdb); if (bidx == 0) { float eehpc = 0.0f; for (int i = 0; i < BDy; ++i) eehpc += ehpc[i]; ehpc[0] = eehpc; } #if 1 /* kde potential */ // lig loop, ~30 for (int i = 0; i < lna_dc; i += blockDim.y) { a_val[threadIdx.y][threadIdx.x] = 0.0f; a_sz[threadIdx.y][threadIdx.x] = 0.0f; const int l = i + threadIdx.y; if (l < lna_dc) { // kde loop, ~400 for (int j = 0; j < pnk_dc; j += blockDim.x) { const int k = j + threadIdx.x; if (k < pnk_dc) { if (mylig->t[l] == kde_dc->t[k]) { const float dx = mylig->coord_new.x[l] - kde_dc->x[k]; const float dy = mylig->coord_new.y[l] - kde_dc->y[k]; const float dz = mylig->coord_new.z[l] - kde_dc->z[k]; const float kde_dst_pow2 = dx * dx + dy * dy + dz * dz; a_val[threadIdx.y][threadIdx.x] += expf (enepara_kde2_dc * kde_dst_pow2); a_sz[threadIdx.y][threadIdx.x] += 1.0f; } } // if (k < pnk_dc) } // kde loop } // if (l < lna_dc) SumReduction2D_2_d (a_val, a_sz); if (threadIdx.x == 0 && l < lna_dc && a_sz[threadIdx.y][0] != 0.0f) ekde[threadIdx.y] += (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]); } // lig loop __syncthreads (); if (bidx == 0) { float eekde = 0.0f; for (int i = 0; i < BDy; ++i) eekde += ekde[i]; eekde = eekde / enepara_kde3_dc; ekde[0] = eekde; } __syncthreads (); #endif #if 1 /* position restraints */ // lhm loop, ~11 for (int i = 0; i < pos_dc; i += blockDim.y) { a_val[threadIdx.y][threadIdx.x] = 0.0f; a_sz[threadIdx.y][threadIdx.x] = 0.0f; const int m = i + threadIdx.y; if (m < pos_dc) { // lig loop, ~30 for (int j = 0; j < lna_dc; j += blockDim.x) { const int l = j + threadIdx.x; if (l < lna_dc) { const int lig_n = mylig->n[l] + 1; if (mcs_dc[m].x[lig_n] != MCS_INVALID_COORD) { const float dx = mylig->coord_new.x[l] - mcs_dc[m].x[lig_n]; const float dy = mylig->coord_new.y[l] - mcs_dc[m].y[lig_n]; const float dz = mylig->coord_new.z[l] - mcs_dc[m].z[lig_n]; a_val[threadIdx.y][threadIdx.x] += dx * dx + dy * dy + dz * dz; a_sz[threadIdx.y][threadIdx.x] += 1.0f; } } // if (l < lna_dc) } // lig loop } // if (m < pos_dc) SumReduction2D_2_d (a_val, a_sz); if (threadIdx.x == 0 && m < pos_dc) { elhm[threadIdx.y] += mcs_dc[m].tcc * sqrtf (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]); } } // lhm loop __syncthreads (); if (bidx == 0) { float eelhm = 0.0f; for (int i = 0; i < BDy; ++i) eelhm += elhm[i]; // dropped the protection (if pos_dc != 0) eelhm = logf (eelhm / pos_dc); elhm[0] = eelhm; } __syncthreads (); #endif // energy edst e[8] __shared__ float edst; if (bidx == 0) { const float dx = mylig->coord_new.center[0] - myprt->pocket_center[0]; const float dy = mylig->coord_new.center[1] - myprt->pocket_center[1]; const float dz = mylig->coord_new.center[2] - myprt->pocket_center[2]; edst = sqrtf (dx * dx + dy * dy + dz * dz); } __syncthreads (); if (bidx == 0) { evdw[0] = evdw[0] / lna_dc; eele[0] = eele[0] / lna_dc; epmf[0] = epmf[0] / lna_dc; epsp[0] = epsp[0] / lna_dc; ehdb[0] = ehdb[0] / lna_dc / sqrtf (2.0f * PI) * -1.0f; // ehdb[0] = ehdb[0] / lna_dc; // using hdb2 is faster ehpc[0] = ehpc[0] / lna_dc; ekde[0] = ekde[0] / lna_dc; // calculate normalized energy evdw[0] = enepara_dc->a_para[0] * evdw[0] + enepara_dc->b_para[0]; eele[0] = enepara_dc->a_para[1] * eele[0] + enepara_dc->b_para[1]; epmf[0] = enepara_dc->a_para[2] * epmf[0] + enepara_dc->b_para[2]; ehpc[0] = enepara_dc->a_para[3] * ehpc[0] + enepara_dc->b_para[3]; ehdb[0] = enepara_dc->a_para[4] * ehdb[0] + enepara_dc->b_para[4]; edst = enepara_dc->a_para[5] * edst + enepara_dc->b_para[5]; epsp[0] = enepara_dc->a_para[6] * epsp[0] + enepara_dc->b_para[6]; ekde[0] = enepara_dc->a_para[7] * ekde[0] + enepara_dc->b_para[7]; elhm[0] = enepara_dc->a_para[8] * elhm[0] + enepara_dc->b_para[8]; // printf("lhm: %f\n", elhm[0]); #if IS_BAYE == 1 // calculate conditional prob belonging to high decoy const float evdw_h = NormPdf(evdw[0], VDW_NORM_HIGH_LOC, VDW_NORM_HIGH_SCALE); const float evdw_l = NormPdf(evdw[0], VDW_NORM_LOW_LOC, VDW_NORM_LOW_SCALE); const float eele_h = CauchyPdf(eele[0], ELE_CAUCHY_HIGH_LOC, ELE_CAUCHY_HIGH_SCALE); const float eele_l = CauchyPdf(eele[0], ELE_CAUCHY_LOW_LOC, ELE_CAUCHY_LOW_SCALE); const float epmf_h = LogisticPdf(epmf[0], PMF_LOGISTIC_HIGH_LOC, PMF_LOGISTIC_HIGH_SCALE); const float epmf_l = LogisticPdf(epmf[0], PMF_LOGISTIC_LOW_LOC, PMF_LOGISTIC_LOW_SCALE); const float ehpc_h = WaldPdf(ehpc[0], HPC_WALD_HIGH_LOC, HPC_WALD_HIGH_SCALE); const float ehpc_l = WaldPdf(ehpc[0], HPC_WALD_LOW_LOC, HPC_WALD_LOW_SCALE); const float ehdb_h = NormPdf(ehdb[0], HDB_NORM_HIGH_LOC, HDB_NORM_HIGH_SCALE); const float ehdb_l = NormPdf(ehdb[0], HDB_LOGISTIC_LOW_LOC, HDB_LOGISTIC_LOW_SCALE); const float edst_h = LogisticPdf(edst, DST_LOGISTIC_HIGH_LOC, DST_LOGISTIC_HIGH_SCALE); const float edst_l = LogisticPdf(edst, DST_LOGISTIC_LOW_LOC, DST_LOGISTIC_LOW_SCALE); const float epsp_h = LogisticPdf(epsp[0], PSP_LOGISTIC_HIGH_LOC, PSP_LOGISTIC_HIGH_SCALE); const float epsp_l = LogisticPdf(epsp[0], PSP_LAPLACE_LOW_LOC, PSP_LAPLACE_LOW_SCALE); const float ekde_h = WaldPdf(ekde[0], KDE_WALD_HIGH_LOC, KDE_WALD_HIGH_SCALE); const float ekde_l = WaldPdf(ekde[0], KDE_WALD_LOW_LOC, KDE_WALD_LOW_SCALE); const float elhm_h = LogisticPdf(elhm[0], LHM_LOGISTIC_HIGH_LOC, LHM_LOGISTIC_HIGH_SCALE); const float elhm_l = LogisticPdf(elhm[0], LHM_LOGISTIC_LOW_LOC, LHM_LOGISTIC_LOW_SCALE); // assert (evdw_h != 0 ); // assert (evdw_l != 0 ); // assert (eele_h != 0 ); // assert (eele_l != 0 ); // assert (epmf_h != 0 ); // assert (epmf_l != 0 ); // assert (ehpc_h != 0 ); // assert (ehpc_l != 0 ); // assert (ehdb_h != 0 ); // assert (ehdb_l != 0 ); // assert (edst_h != 0 ); // assert (edst_l != 0 ); // assert (epsp_h != 0 ); // assert (epsp_l != 0 ); // assert (ekde_h != 0 ); // assert (ekde_l != 0 ); // assert (elhm_h != 0 ); // assert (elhm_l != 0 ); // calculate conditional prob const float prob_h = log10f(evdw_h) + log10f(eele_h) + log10f(epmf_h) + log10f(ehpc_h) + log10f(ehdb_h) + log10f(edst_h) + log10f(epsp_h) + log10f(ekde_h) + log10f(elhm_h); const float prob_l = log10f(evdw_l) + log10f(eele_l) + log10f(epmf_l) + log10f(ehpc_l) + log10f(ehdb_l) + log10f(edst_l) + log10f(epsp_l) + log10f(ekde_l) + log10f(elhm_l); const float etotal = prob_l - prob_h; #elif IS_BAYE == 0 // calculate the total energy using linear combination const float etotal = enepara_dc->w[0] * evdw[0] + enepara_dc->w[1] * eele[0] + enepara_dc->w[2] * epmf[0] + enepara_dc->w[3] * ehpc[0] + enepara_dc->w[4] * ehdb[0] + enepara_dc->w[5] * edst + enepara_dc->w[6] * epsp[0] + enepara_dc->w[7] * ekde[0] + enepara_dc->w[8] * elhm[0]; #endif float * e = &mylig->energy_new.e[0]; e[0] = evdw[0]; e[1] = eele[0]; e[2] = epmf[0]; e[3] = epsp[0]; e[4] = ehdb[0]; e[5] = ehpc[0]; e[6] = ekde[0]; e[7] = elhm[0]; e[8] = edst; e[9] = etotal; // e[9] = edst; } }
13fdbff2d7eeba2bb59b699bfb6e9e5072601f07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmergebicgstab.cu, normal z -> c, Thu Oct 8 23:05:47 2020 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_c // These routines merge multiple kernels from bicgstab into one. /* -------------------------------------------------------------------------- */ __global__ void magma_cbicgstab_1_kernel( int num_rows, int num_cols, magmaFloatComplex beta, magmaFloatComplex omega, magmaFloatComplex *r, magmaFloatComplex *v, magmaFloatComplex *p ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ p[ i+j*num_rows ] = r[ i+j*num_rows ] + beta * ( p[ i+j*num_rows ] - omega * v[ i+j*num_rows ] ); } } } /** Purpose ------- Mergels multiple operations into one kernel: p = r + beta * ( p - omega * v ) @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta magmaFloatComplex scalar @param[in] omega magmaFloatComplex scalar @param[in] r magmaFloatComplex_ptr vector @param[in] v magmaFloatComplex_ptr vector @param[in,out] p magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbicgstab_1( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex beta, magmaFloatComplex omega, magmaFloatComplex_ptr r, magmaFloatComplex_ptr v, magmaFloatComplex_ptr p, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_cbicgstab_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, omega, r, v, p ); return MAGMA_SUCCESS; } __global__ void magma_cbicgstab_2_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex_ptr r, magmaFloatComplex_ptr v, magmaFloatComplex_ptr s ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ s[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * v[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: s = r - alpha v Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex scalar @param[in] r magmaFloatComplex_ptr vector @param[in] v magmaFloatComplex_ptr vector @param[in,out] s magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbicgstab_2( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex alpha, magmaFloatComplex_ptr r, magmaFloatComplex_ptr v, magmaFloatComplex_ptr s, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_cbicgstab_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, r, v, s ); return MAGMA_SUCCESS; } __global__ void magma_cbicgstab_3_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex omega, magmaFloatComplex *p, magmaFloatComplex *s, magmaFloatComplex *t, magmaFloatComplex *x, magmaFloatComplex *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ magmaFloatComplex tmp = s[ i+j*num_rows ]; x[ i+j*num_rows ] = x[ i+j*num_rows ] + alpha * p[ i+j*num_rows ] + omega * tmp; r[ i+j*num_rows ] = tmp - omega * t[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + alpha * p + omega * s r = s - omega * t Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex scalar @param[in] omega magmaFloatComplex scalar @param[in] p magmaFloatComplex_ptr vector @param[in] s magmaFloatComplex_ptr vector @param[in] t magmaFloatComplex_ptr vector @param[in,out] x magmaFloatComplex_ptr vector @param[in,out] r magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbicgstab_3( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex alpha, magmaFloatComplex omega, magmaFloatComplex_ptr p, magmaFloatComplex_ptr s, magmaFloatComplex_ptr t, magmaFloatComplex_ptr x, magmaFloatComplex_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_cbicgstab_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, omega, p, s, t, x, r ); return MAGMA_SUCCESS; } __global__ void magma_cbicgstab_4_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex omega, magmaFloatComplex *y, magmaFloatComplex *z, magmaFloatComplex *s, magmaFloatComplex *t, magmaFloatComplex *x, magmaFloatComplex *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ x[ i+j*num_rows ] = x[ i+j*num_rows ] + alpha * y[ i+j*num_rows ] + omega * z[ i+j*num_rows ]; r[ i+j*num_rows ] = s[ i+j*num_rows ] - omega * t[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + alpha * y + omega * z r = s - omega * t Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex scalar @param[in] omega magmaFloatComplex scalar @param[in] y magmaFloatComplex_ptr vector @param[in] z magmaFloatComplex_ptr vector @param[in] s magmaFloatComplex_ptr vector @param[in] t magmaFloatComplex_ptr vector @param[in,out] x magmaFloatComplex_ptr vector @param[in,out] r magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbicgstab_4( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex alpha, magmaFloatComplex omega, magmaFloatComplex_ptr y, magmaFloatComplex_ptr z, magmaFloatComplex_ptr s, magmaFloatComplex_ptr t, magmaFloatComplex_ptr x, magmaFloatComplex_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_cbicgstab_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, omega, y, z, s, t, x, r ); return MAGMA_SUCCESS; }
13fdbff2d7eeba2bb59b699bfb6e9e5072601f07.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmergebicgstab.cu, normal z -> c, Thu Oct 8 23:05:47 2020 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_c // These routines merge multiple kernels from bicgstab into one. /* -------------------------------------------------------------------------- */ __global__ void magma_cbicgstab_1_kernel( int num_rows, int num_cols, magmaFloatComplex beta, magmaFloatComplex omega, magmaFloatComplex *r, magmaFloatComplex *v, magmaFloatComplex *p ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ p[ i+j*num_rows ] = r[ i+j*num_rows ] + beta * ( p[ i+j*num_rows ] - omega * v[ i+j*num_rows ] ); } } } /** Purpose ------- Mergels multiple operations into one kernel: p = r + beta * ( p - omega * v ) @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta magmaFloatComplex scalar @param[in] omega magmaFloatComplex scalar @param[in] r magmaFloatComplex_ptr vector @param[in] v magmaFloatComplex_ptr vector @param[in,out] p magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbicgstab_1( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex beta, magmaFloatComplex omega, magmaFloatComplex_ptr r, magmaFloatComplex_ptr v, magmaFloatComplex_ptr p, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_cbicgstab_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, omega, r, v, p ); return MAGMA_SUCCESS; } __global__ void magma_cbicgstab_2_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex_ptr r, magmaFloatComplex_ptr v, magmaFloatComplex_ptr s ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ s[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * v[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: s = r - alpha v Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex scalar @param[in] r magmaFloatComplex_ptr vector @param[in] v magmaFloatComplex_ptr vector @param[in,out] s magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbicgstab_2( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex alpha, magmaFloatComplex_ptr r, magmaFloatComplex_ptr v, magmaFloatComplex_ptr s, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_cbicgstab_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, r, v, s ); return MAGMA_SUCCESS; } __global__ void magma_cbicgstab_3_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex omega, magmaFloatComplex *p, magmaFloatComplex *s, magmaFloatComplex *t, magmaFloatComplex *x, magmaFloatComplex *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ magmaFloatComplex tmp = s[ i+j*num_rows ]; x[ i+j*num_rows ] = x[ i+j*num_rows ] + alpha * p[ i+j*num_rows ] + omega * tmp; r[ i+j*num_rows ] = tmp - omega * t[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + alpha * p + omega * s r = s - omega * t Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex scalar @param[in] omega magmaFloatComplex scalar @param[in] p magmaFloatComplex_ptr vector @param[in] s magmaFloatComplex_ptr vector @param[in] t magmaFloatComplex_ptr vector @param[in,out] x magmaFloatComplex_ptr vector @param[in,out] r magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbicgstab_3( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex alpha, magmaFloatComplex omega, magmaFloatComplex_ptr p, magmaFloatComplex_ptr s, magmaFloatComplex_ptr t, magmaFloatComplex_ptr x, magmaFloatComplex_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_cbicgstab_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, omega, p, s, t, x, r ); return MAGMA_SUCCESS; } __global__ void magma_cbicgstab_4_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex omega, magmaFloatComplex *y, magmaFloatComplex *z, magmaFloatComplex *s, magmaFloatComplex *t, magmaFloatComplex *x, magmaFloatComplex *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ x[ i+j*num_rows ] = x[ i+j*num_rows ] + alpha * y[ i+j*num_rows ] + omega * z[ i+j*num_rows ]; r[ i+j*num_rows ] = s[ i+j*num_rows ] - omega * t[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + alpha * y + omega * z r = s - omega * t Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex scalar @param[in] omega magmaFloatComplex scalar @param[in] y magmaFloatComplex_ptr vector @param[in] z magmaFloatComplex_ptr vector @param[in] s magmaFloatComplex_ptr vector @param[in] t magmaFloatComplex_ptr vector @param[in,out] x magmaFloatComplex_ptr vector @param[in,out] r magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbicgstab_4( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex alpha, magmaFloatComplex omega, magmaFloatComplex_ptr y, magmaFloatComplex_ptr z, magmaFloatComplex_ptr s, magmaFloatComplex_ptr t, magmaFloatComplex_ptr x, magmaFloatComplex_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_cbicgstab_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, omega, y, z, s, t, x, r ); return MAGMA_SUCCESS; }
d308a1bcda920e232a7a6cc374a87f11c079181c.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <math.h> #include <time.h> #include <mpi.h> #include <stdio.h> #include <stdlib.h> /* if the lenght of vector is large, set this to zero */ #define PRINT_VECTOR_CONTENT 0 /* for measuring time */ double getUnixTime(void){ struct timespec tv; if(clock_gettime(CLOCK_REALTIME, &tv) != 0) return 0; return (((double) tv.tv_sec) + (double) (tv.tv_nsec / 1000000000.0)); } #ifdef USE_ROCM /* CUDA stuff: */ #include "hip/hip_runtime.h" /* cuda error check */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"\n\x1B[31mCUDA error:\x1B[0m %s %s \x1B[33m%d\x1B[0m\n\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } /* kernel called in this example */ __global__ void mykernel(double *x_arr, int mysize, int Tstart){ int i = blockIdx.x*blockDim.x + threadIdx.x; /* compute my id */ if(i<mysize){ /* maybe we call more than mysize kernels */ x_arr[i] = Tstart+i; } /* i >= mysize then relax and do nothing */ } /* print kernel, call only one (!) */ /* in practical applications, this way is a real bottleneck, array should be transfered to CPU and printed there */ /* anyway, in this sample I want to print only small arrays, so who cares.. */ __global__ void printkernel(double *x_arr, int mysize){ printf(" ["); for(int i=0;i<mysize;i++){ printf(" %f", x_arr[i]); if(i < mysize-1) printf(","); } printf(" ]\n"); } __global__ void printkernel_id(double *x_arr, int mysize, int id, int rank){ printf("%d_GPU: from cuda x_arr[%d]: ", rank, id); if(id < mysize){ printf("%f", x_arr[id]); } else { printf("out of range"); } printf("\n"); } /* end of CUDA stuff */ #endif int main( int argc, char *argv[] ) { /* load console arguments */ int Tlocal; int nmb_of_tests; if(argc != 3){ std::cout << "call with: ./sample_weak Tlocal nmb_of_tests" << std::endl; return 1; } else { Tlocal = atoi(argv[1]); nmb_of_tests = atoi(argv[2]); } /* MPI stuff */ MPI_Init(NULL, NULL); /* Initialize the MPI environment */ int MPIrank, MPIsize; MPI_Comm_rank(MPI_COMM_WORLD, &MPIrank); MPI_Comm_size(MPI_COMM_WORLD, &MPIsize); /* compute T */ int T = MPIsize*Tlocal; int Tstart = MPIrank*Tlocal; #ifdef USE_ROCM /* CUDA stuff */ gpuErrchk( hipDeviceReset() ); #endif /* print problem info */ if(MPIrank == 0){ /* only master prints */ std::cout << "Benchmark started" << std::endl; std::cout << " T = " << T << std::endl; std::cout << " Tlocal = " << Tlocal << std::endl; std::cout << " nmb_of_tests = " << nmb_of_tests << std::endl; std::cout << " MPIsize = " << MPIsize << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); std::cout << " * MPIrank = " << MPIrank << std::endl; /* everybody say hello */ MPI_Barrier( MPI_COMM_WORLD ); if(MPIrank == 0){ /* only master prints */ std::cout << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); double timer; double timer1; double timer2; double *x_arr; /* my array on GPU */ #ifdef USE_ROCM /* ------- CUDA version ------- */ int minGridSize, blockSize, gridSize; /* for optimal call */ /* allocate array */ timer = getUnixTime(); /* start to measure time */ gpuErrchk( hipMalloc(&x_arr, sizeof(double)*Tlocal) ); if(MPIrank == 0){ /* only master prints */ std::cout << " - allocation: " << getUnixTime() - timer << "s" << std::endl; } /* warm up */ hipLaunchKernelGGL(( mykernel), dim3(1),dim3(1), 0, 0, x_arr,1,1); gpuErrchk( hipDeviceSynchronize() ); MPI_Barrier( MPI_COMM_WORLD ); /* compute optimal parameters of the call */ gpuErrchk( hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,mykernel, 0, 0) ); gridSize = (Tlocal + blockSize - 1)/ blockSize; timer = getUnixTime(); for(int k=0;k<nmb_of_tests;k++){ hipLaunchKernelGGL(( mykernel), dim3(gridSize), dim3(blockSize), 0, 0, x_arr, Tlocal, Tstart); gpuErrchk( hipDeviceSynchronize() ); MPI_Barrier( MPI_COMM_WORLD ); } hipLaunchKernelGGL(( printkernel_id), dim3(1),dim3(1), 0, 0, x_arr, Tlocal, 0, MPIrank); gpuErrchk( hipDeviceSynchronize() ); hipLaunchKernelGGL(( printkernel_id), dim3(1),dim3(1), 0, 0, x_arr, Tlocal, 1000, MPIrank); gpuErrchk( hipDeviceSynchronize() ); hipLaunchKernelGGL(( printkernel_id), dim3(1),dim3(1), 0, 0, x_arr, Tlocal, Tlocal-1, MPIrank); gpuErrchk( hipDeviceSynchronize() ); timer1 = getUnixTime() - timer; for(int k=0;k<MPIsize;k++){ if(k==MPIrank){ /* my turn - I am printing */ std::cout << MPIrank << ". ( gridSize = " << gridSize << ", blockSize = " << blockSize << " )" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); } /* print array */ if(PRINT_VECTOR_CONTENT){ timer = getUnixTime(); for(int k=0;k<MPIsize;k++){ if(k==MPIrank){ /* my turn - I am printing */ std::cout << k << ".CPU:" << std::endl; hipLaunchKernelGGL(( printkernel), dim3(1),dim3(1), 0, 0, x_arr,Tlocal); gpuErrchk( hipDeviceSynchronize() ); } MPI_Barrier( MPI_COMM_WORLD ); } if(MPIrank == 0){ /* only master prints */ std::cout << " - printed in: " << getUnixTime() - timer << "s" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); } /* destroy array */ timer = getUnixTime(); gpuErrchk( hipFree(x_arr) ); MPI_Barrier( MPI_COMM_WORLD ); if(MPIrank == 0){ /* only master prints */ std::cout << " - destruction: " << getUnixTime() - timer << "s" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); #else /* ------- SEQUENTIAL version ------- */ /* allocate array */ timer = getUnixTime(); x_arr = new double[Tlocal]; MPI_Barrier( MPI_COMM_WORLD ); if(MPIrank == 0){ /* only master prints */ std::cout << " - allocation: " << getUnixTime() - timer << "s" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); /* fill array */ timer = getUnixTime(); for(int k=0;k<nmb_of_tests;k++){ for(int i=0;i<Tlocal;i++){ x_arr[i] = Tstart+i; } MPI_Barrier( MPI_COMM_WORLD ); } timer2 = getUnixTime() - timer; MPI_Barrier( MPI_COMM_WORLD ); /* print array */ if(PRINT_VECTOR_CONTENT){ timer = getUnixTime(); for(int k=0;k<MPIsize;k++){ if(k==MPIrank){ /* my turn - I am printing */ std::cout << k << ".CPU:" << std::endl; std::cout << " ["; for(int i=0;i<Tlocal;i++){ std::cout << " " << x_arr[i]; if(i < Tlocal-1) std::cout << ","; } std::cout << " ]" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); } if(MPIrank == 0){ /* only master prints */ std::cout << " - printed in: " << getUnixTime() - timer << "s" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); } MPI_Barrier( MPI_COMM_WORLD ); /* destroy array */ timer = getUnixTime(); delete [] x_arr; MPI_Barrier( MPI_COMM_WORLD ); if(MPIrank == 0){ /* only master prints */ std::cout << " - destruction: " << getUnixTime() - timer << "s" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); #endif /* final print of timers */ if(MPIrank==0){ /* only master prints */ std::cout << std::endl; std::cout << "---- TIMERS ----" << std::endl; #ifdef USE_ROCM std::cout << " GPU = " << timer1 << "s" << std::endl; #else std::cout << " CPU = " << timer2 << "s" << std::endl; #endif std::cout << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); /* MPI stuff */ MPI_Finalize(); /* Finalize the MPI environment. */ return 0; }
d308a1bcda920e232a7a6cc374a87f11c079181c.cu
#include <iostream> #include <math.h> #include <time.h> #include <mpi.h> #include <stdio.h> #include <stdlib.h> /* if the lenght of vector is large, set this to zero */ #define PRINT_VECTOR_CONTENT 0 /* for measuring time */ double getUnixTime(void){ struct timespec tv; if(clock_gettime(CLOCK_REALTIME, &tv) != 0) return 0; return (((double) tv.tv_sec) + (double) (tv.tv_nsec / 1000000000.0)); } #ifdef USE_CUDA /* CUDA stuff: */ #include "cuda.h" /* cuda error check */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"\n\x1B[31mCUDA error:\x1B[0m %s %s \x1B[33m%d\x1B[0m\n\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /* kernel called in this example */ __global__ void mykernel(double *x_arr, int mysize, int Tstart){ int i = blockIdx.x*blockDim.x + threadIdx.x; /* compute my id */ if(i<mysize){ /* maybe we call more than mysize kernels */ x_arr[i] = Tstart+i; } /* i >= mysize then relax and do nothing */ } /* print kernel, call only one (!) */ /* in practical applications, this way is a real bottleneck, array should be transfered to CPU and printed there */ /* anyway, in this sample I want to print only small arrays, so who cares.. */ __global__ void printkernel(double *x_arr, int mysize){ printf(" ["); for(int i=0;i<mysize;i++){ printf(" %f", x_arr[i]); if(i < mysize-1) printf(","); } printf(" ]\n"); } __global__ void printkernel_id(double *x_arr, int mysize, int id, int rank){ printf("%d_GPU: from cuda x_arr[%d]: ", rank, id); if(id < mysize){ printf("%f", x_arr[id]); } else { printf("out of range"); } printf("\n"); } /* end of CUDA stuff */ #endif int main( int argc, char *argv[] ) { /* load console arguments */ int Tlocal; int nmb_of_tests; if(argc != 3){ std::cout << "call with: ./sample_weak Tlocal nmb_of_tests" << std::endl; return 1; } else { Tlocal = atoi(argv[1]); nmb_of_tests = atoi(argv[2]); } /* MPI stuff */ MPI_Init(NULL, NULL); /* Initialize the MPI environment */ int MPIrank, MPIsize; MPI_Comm_rank(MPI_COMM_WORLD, &MPIrank); MPI_Comm_size(MPI_COMM_WORLD, &MPIsize); /* compute T */ int T = MPIsize*Tlocal; int Tstart = MPIrank*Tlocal; #ifdef USE_CUDA /* CUDA stuff */ gpuErrchk( cudaDeviceReset() ); #endif /* print problem info */ if(MPIrank == 0){ /* only master prints */ std::cout << "Benchmark started" << std::endl; std::cout << " T = " << T << std::endl; std::cout << " Tlocal = " << Tlocal << std::endl; std::cout << " nmb_of_tests = " << nmb_of_tests << std::endl; std::cout << " MPIsize = " << MPIsize << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); std::cout << " * MPIrank = " << MPIrank << std::endl; /* everybody say hello */ MPI_Barrier( MPI_COMM_WORLD ); if(MPIrank == 0){ /* only master prints */ std::cout << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); double timer; double timer1; double timer2; double *x_arr; /* my array on GPU */ #ifdef USE_CUDA /* ------- CUDA version ------- */ int minGridSize, blockSize, gridSize; /* for optimal call */ /* allocate array */ timer = getUnixTime(); /* start to measure time */ gpuErrchk( cudaMalloc(&x_arr, sizeof(double)*Tlocal) ); if(MPIrank == 0){ /* only master prints */ std::cout << " - allocation: " << getUnixTime() - timer << "s" << std::endl; } /* warm up */ mykernel<<<1,1>>>(x_arr,1,1); gpuErrchk( cudaDeviceSynchronize() ); MPI_Barrier( MPI_COMM_WORLD ); /* compute optimal parameters of the call */ gpuErrchk( cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,mykernel, 0, 0) ); gridSize = (Tlocal + blockSize - 1)/ blockSize; timer = getUnixTime(); for(int k=0;k<nmb_of_tests;k++){ mykernel<<<gridSize, blockSize>>>(x_arr, Tlocal, Tstart); gpuErrchk( cudaDeviceSynchronize() ); MPI_Barrier( MPI_COMM_WORLD ); } printkernel_id<<<1,1>>>(x_arr, Tlocal, 0, MPIrank); gpuErrchk( cudaDeviceSynchronize() ); printkernel_id<<<1,1>>>(x_arr, Tlocal, 1000, MPIrank); gpuErrchk( cudaDeviceSynchronize() ); printkernel_id<<<1,1>>>(x_arr, Tlocal, Tlocal-1, MPIrank); gpuErrchk( cudaDeviceSynchronize() ); timer1 = getUnixTime() - timer; for(int k=0;k<MPIsize;k++){ if(k==MPIrank){ /* my turn - I am printing */ std::cout << MPIrank << ". ( gridSize = " << gridSize << ", blockSize = " << blockSize << " )" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); } /* print array */ if(PRINT_VECTOR_CONTENT){ timer = getUnixTime(); for(int k=0;k<MPIsize;k++){ if(k==MPIrank){ /* my turn - I am printing */ std::cout << k << ".CPU:" << std::endl; printkernel<<<1,1>>>(x_arr,Tlocal); gpuErrchk( cudaDeviceSynchronize() ); } MPI_Barrier( MPI_COMM_WORLD ); } if(MPIrank == 0){ /* only master prints */ std::cout << " - printed in: " << getUnixTime() - timer << "s" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); } /* destroy array */ timer = getUnixTime(); gpuErrchk( cudaFree(x_arr) ); MPI_Barrier( MPI_COMM_WORLD ); if(MPIrank == 0){ /* only master prints */ std::cout << " - destruction: " << getUnixTime() - timer << "s" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); #else /* ------- SEQUENTIAL version ------- */ /* allocate array */ timer = getUnixTime(); x_arr = new double[Tlocal]; MPI_Barrier( MPI_COMM_WORLD ); if(MPIrank == 0){ /* only master prints */ std::cout << " - allocation: " << getUnixTime() - timer << "s" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); /* fill array */ timer = getUnixTime(); for(int k=0;k<nmb_of_tests;k++){ for(int i=0;i<Tlocal;i++){ x_arr[i] = Tstart+i; } MPI_Barrier( MPI_COMM_WORLD ); } timer2 = getUnixTime() - timer; MPI_Barrier( MPI_COMM_WORLD ); /* print array */ if(PRINT_VECTOR_CONTENT){ timer = getUnixTime(); for(int k=0;k<MPIsize;k++){ if(k==MPIrank){ /* my turn - I am printing */ std::cout << k << ".CPU:" << std::endl; std::cout << " ["; for(int i=0;i<Tlocal;i++){ std::cout << " " << x_arr[i]; if(i < Tlocal-1) std::cout << ","; } std::cout << " ]" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); } if(MPIrank == 0){ /* only master prints */ std::cout << " - printed in: " << getUnixTime() - timer << "s" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); } MPI_Barrier( MPI_COMM_WORLD ); /* destroy array */ timer = getUnixTime(); delete [] x_arr; MPI_Barrier( MPI_COMM_WORLD ); if(MPIrank == 0){ /* only master prints */ std::cout << " - destruction: " << getUnixTime() - timer << "s" << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); #endif /* final print of timers */ if(MPIrank==0){ /* only master prints */ std::cout << std::endl; std::cout << "---- TIMERS ----" << std::endl; #ifdef USE_CUDA std::cout << " GPU = " << timer1 << "s" << std::endl; #else std::cout << " CPU = " << timer2 << "s" << std::endl; #endif std::cout << std::endl; } MPI_Barrier( MPI_COMM_WORLD ); /* MPI stuff */ MPI_Finalize(); /* Finalize the MPI environment. */ return 0; }
ebd1cfa166bc61e486af2f2f8d5052978c02e671.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Find BLANK and replace your own code. * And submit report why do you replace the blank that way. */ #include<stdlib.h> #include<iostream> #include<fstream> #include<vector> #include<string> #define TILE_WIDTH 32 /* set TILE_WIDTH 16 for the evaluation! */ #define MAXPOOL_INPUT_FILENAME "input.txt" #define A_FILENAME "a.txt" #define B_FILENAME "b.txt" #define C_FILENAME "c.txt" using namespace std; __global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) { // input : input_matrix address // output : output buffer address // input_size : width, height of input matrix // filter_size : filter_size of maxpolling // all input, output matrices are vectorized int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; // CHANGE float max = 0.0; for(int i = 0; i<3; i++){ for(int j = 0; j<3; j++){ int var = input[(filter_size * col) + (input_size * filter_size * row) + j + (input_size * i)]; if(var > max) max = var; } } output[col + row *(input_size/filter_size)] = max; // out of bound } __global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_size){ // a, b, c : input matrix address // alpha, beta : input constant // output : output buffer address // input_size : width, height of input matrix // all input, output matrices are vectorized int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int row = by*blockDim.y + ty; int col = bx*blockDim.x + tx; //if(row>=input_size ||col>=input_size) { return; } // allocate 2D tiles in __shared__ memory __shared__ float s_a[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_b[TILE_WIDTH][TILE_WIDTH]; float result = 0; // make sure you handle the case when the matrix sizes are not // multiple of TILE_WIDTH! // loop over the tiles of the input in phases int p = 0; for(p = 0; p <= input_size/TILE_WIDTH; ++p){ // CHANGE if(p*TILE_WIDTH + tx < input_size && row <input_size) s_a[ty][tx] = a[row * input_size + tx + p * TILE_WIDTH]; else s_a[ty][tx] = 0.0; if(p*TILE_WIDTH + ty < input_size && col <input_size) s_b[ty][tx] = b[col + input_size * ty + p * input_size * TILE_WIDTH]; else s_b[ty][tx] = 0.0; __syncthreads(); for(int i = 0; i < TILE_WIDTH; i++){ result += alpha * s_a[ty][i] * s_b[i][tx]; // result += alpha * s_a[ty][i] * s_b[i][tx]; } __syncthreads(); // You need to use __syncthreads() a few times // to synchronize the threads in a thread block. } // write out the result to output[row*input_size + col] // CHANGE if(row<input_size && col <input_size) output[row*input_size + col] = result + c[row*input_size + col] * beta; } int main(int argc, char **argv) { if(argc < 4) { cout << "usage : " << argv[0] << " input_size filter_size alpha beta\n" << "example : " << argv[0] << " 100 2 0.5 0.8\n"; return 1; } const int input_size = stoi(argv[1]); const int filter_size = stoi(argv[2]); // used for maxpooling const float alpha = stof(argv[3]); const float beta = stof(argv[4]); const int maxpool_output_size = input_size/filter_size; // check input_size is power of 2 if(input_size == 0 && (input_size & (input_size-1))){ cout << "input_size must be power of 2\n"; return 1; } if(filter_size == 0){ cout << "filter_size cannot be 0\n"; return 1; } float maxpool_input[input_size*input_size]; float a[input_size*input_size]; float b[input_size*input_size]; float c[input_size*input_size]; // read input matrices ifstream input_in(MAXPOOL_INPUT_FILENAME); ifstream a_in(A_FILENAME); ifstream b_in(B_FILENAME); ifstream c_in(C_FILENAME); for (int i = 0; i < input_size*input_size; ++i) { input_in >> maxpool_input[i]; a_in >> a[i]; b_in >> b[i]; c_in >> c[i]; } // prints inputs for debugging. cout<<"filter size : "<<filter_size; cout<<"\n========== MAXPOOL_INPUT ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<maxpool_input[i]<<" "; } cout<<"\nalpha : "<<alpha<<'\n'; cout<<"========== A ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<a[i]<<" "; } cout<<"\n========== B ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<b[i]<<" "; } cout<<"\nbeta : "<<beta<<'\n'; cout<<"========== C ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<c[i]<<" "; } cout<<'\n'; // set thread, block dimensions const dim3 block_size(TILE_WIDTH, TILE_WIDTH); const dim3 num_of_maxpool_blocks(maxpool_output_size/block_size.x+1, maxpool_output_size/block_size.y+1); const dim3 num_of_blocks(input_size/block_size.x+1, input_size/block_size.y+1); // memory allocation for the device float *dev_mem_a, *dev_mem_b, *dev_mem_c, *dev_mem_input, *gemm_output, *maxpool_output; hipMalloc(&dev_mem_a, sizeof(float) * input_size * input_size); hipMalloc(&dev_mem_b, sizeof(float) * input_size * input_size); hipMalloc(&dev_mem_c, sizeof(float) * input_size * input_size); hipMalloc(&gemm_output, sizeof(float) * input_size * input_size); hipMalloc(&dev_mem_input, sizeof(float) * input_size * input_size); hipMalloc(&maxpool_output, sizeof(float) * maxpool_output_size * maxpool_output_size); // copy variable to device memory hipMemcpy(dev_mem_a, a, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice); hipMemcpy(dev_mem_b, b, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice); hipMemcpy(dev_mem_c, c, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice); hipMemcpy(dev_mem_input, maxpool_input, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice); // launch CUDA kernels // First launch gemm kernel hipLaunchKernelGGL(( gemm), dim3(num_of_blocks), dim3(block_size), 0, 0, dev_mem_a, dev_mem_b, dev_mem_c, alpha, beta, gemm_output, input_size); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr, "ERROR %s\n", hipGetErrorString(error)); return 1; } // Then run maxpooling hipLaunchKernelGGL(( maxpool), dim3(num_of_maxpool_blocks), dim3(block_size), 0, 0, dev_mem_input, maxpool_output, input_size, filter_size); hipDeviceSynchronize(); error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr, "ERROR %s\n", hipGetErrorString(error)); return 1; } // allocate output buf in main memory float *gemm_output_buf = (float*) malloc (sizeof(float)*input_size*input_size); float *maxpool_output_buf = (float*) malloc (sizeof(float)*maxpool_output_size*maxpool_output_size); // copy results from device to host hipMemcpy(gemm_output_buf, gemm_output, sizeof(float)*input_size*input_size, hipMemcpyDeviceToHost); hipMemcpy(maxpool_output_buf, maxpool_output, sizeof(float)*maxpool_output_size*maxpool_output_size, hipMemcpyDeviceToHost); // prints the results cout<<"\n========== GEMM OUTPUT ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<gemm_output_buf[i]<<" "; } cout<<"\n========== MAXPOOL OUTPUT ==========\n"; for (int i = 0; i < maxpool_output_size * maxpool_output_size; ++i) { if(i%maxpool_output_size==0) cout<<"\n"; cout<<maxpool_output_buf[i]<<" "; } cout<<'\n'; hipFree(dev_mem_a); hipFree(dev_mem_b); hipFree(dev_mem_c); hipFree(gemm_output); hipFree(dev_mem_input); hipFree(maxpool_output); free(gemm_output_buf); free(maxpool_output_buf); return 0; }
ebd1cfa166bc61e486af2f2f8d5052978c02e671.cu
/* * Find BLANK and replace your own code. * And submit report why do you replace the blank that way. */ #include<stdlib.h> #include<iostream> #include<fstream> #include<vector> #include<string> #define TILE_WIDTH 32 /* set TILE_WIDTH 16 for the evaluation! */ #define MAXPOOL_INPUT_FILENAME "input.txt" #define A_FILENAME "a.txt" #define B_FILENAME "b.txt" #define C_FILENAME "c.txt" using namespace std; __global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) { // input : input_matrix address // output : output buffer address // input_size : width, height of input matrix // filter_size : filter_size of maxpolling // all input, output matrices are vectorized int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; // CHANGE float max = 0.0; for(int i = 0; i<3; i++){ for(int j = 0; j<3; j++){ int var = input[(filter_size * col) + (input_size * filter_size * row) + j + (input_size * i)]; if(var > max) max = var; } } output[col + row *(input_size/filter_size)] = max; // out of bound } __global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_size){ // a, b, c : input matrix address // alpha, beta : input constant // output : output buffer address // input_size : width, height of input matrix // all input, output matrices are vectorized int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int row = by*blockDim.y + ty; int col = bx*blockDim.x + tx; //if(row>=input_size ||col>=input_size) { return; } // allocate 2D tiles in __shared__ memory __shared__ float s_a[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_b[TILE_WIDTH][TILE_WIDTH]; float result = 0; // make sure you handle the case when the matrix sizes are not // multiple of TILE_WIDTH! // loop over the tiles of the input in phases int p = 0; for(p = 0; p <= input_size/TILE_WIDTH; ++p){ // CHANGE if(p*TILE_WIDTH + tx < input_size && row <input_size) s_a[ty][tx] = a[row * input_size + tx + p * TILE_WIDTH]; else s_a[ty][tx] = 0.0; if(p*TILE_WIDTH + ty < input_size && col <input_size) s_b[ty][tx] = b[col + input_size * ty + p * input_size * TILE_WIDTH]; else s_b[ty][tx] = 0.0; __syncthreads(); for(int i = 0; i < TILE_WIDTH; i++){ result += alpha * s_a[ty][i] * s_b[i][tx]; // result += alpha * s_a[ty][i] * s_b[i][tx]; } __syncthreads(); // You need to use __syncthreads() a few times // to synchronize the threads in a thread block. } // write out the result to output[row*input_size + col] // CHANGE if(row<input_size && col <input_size) output[row*input_size + col] = result + c[row*input_size + col] * beta; } int main(int argc, char **argv) { if(argc < 4) { cout << "usage : " << argv[0] << " input_size filter_size alpha beta\n" << "example : " << argv[0] << " 100 2 0.5 0.8\n"; return 1; } const int input_size = stoi(argv[1]); const int filter_size = stoi(argv[2]); // used for maxpooling const float alpha = stof(argv[3]); const float beta = stof(argv[4]); const int maxpool_output_size = input_size/filter_size; // check input_size is power of 2 if(input_size == 0 && (input_size & (input_size-1))){ cout << "input_size must be power of 2\n"; return 1; } if(filter_size == 0){ cout << "filter_size cannot be 0\n"; return 1; } float maxpool_input[input_size*input_size]; float a[input_size*input_size]; float b[input_size*input_size]; float c[input_size*input_size]; // read input matrices ifstream input_in(MAXPOOL_INPUT_FILENAME); ifstream a_in(A_FILENAME); ifstream b_in(B_FILENAME); ifstream c_in(C_FILENAME); for (int i = 0; i < input_size*input_size; ++i) { input_in >> maxpool_input[i]; a_in >> a[i]; b_in >> b[i]; c_in >> c[i]; } // prints inputs for debugging. cout<<"filter size : "<<filter_size; cout<<"\n========== MAXPOOL_INPUT ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<maxpool_input[i]<<" "; } cout<<"\nalpha : "<<alpha<<'\n'; cout<<"========== A ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<a[i]<<" "; } cout<<"\n========== B ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<b[i]<<" "; } cout<<"\nbeta : "<<beta<<'\n'; cout<<"========== C ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<c[i]<<" "; } cout<<'\n'; // set thread, block dimensions const dim3 block_size(TILE_WIDTH, TILE_WIDTH); const dim3 num_of_maxpool_blocks(maxpool_output_size/block_size.x+1, maxpool_output_size/block_size.y+1); const dim3 num_of_blocks(input_size/block_size.x+1, input_size/block_size.y+1); // memory allocation for the device float *dev_mem_a, *dev_mem_b, *dev_mem_c, *dev_mem_input, *gemm_output, *maxpool_output; cudaMalloc(&dev_mem_a, sizeof(float) * input_size * input_size); cudaMalloc(&dev_mem_b, sizeof(float) * input_size * input_size); cudaMalloc(&dev_mem_c, sizeof(float) * input_size * input_size); cudaMalloc(&gemm_output, sizeof(float) * input_size * input_size); cudaMalloc(&dev_mem_input, sizeof(float) * input_size * input_size); cudaMalloc(&maxpool_output, sizeof(float) * maxpool_output_size * maxpool_output_size); // copy variable to device memory cudaMemcpy(dev_mem_a, a, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_mem_b, b, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_mem_c, c, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_mem_input, maxpool_input, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); // launch CUDA kernels // First launch gemm kernel gemm<<<num_of_blocks, block_size>>>(dev_mem_a, dev_mem_b, dev_mem_c, alpha, beta, gemm_output, input_size); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error)); return 1; } // Then run maxpooling maxpool<<<num_of_maxpool_blocks, block_size>>>(dev_mem_input, maxpool_output, input_size, filter_size); cudaDeviceSynchronize(); error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error)); return 1; } // allocate output buf in main memory float *gemm_output_buf = (float*) malloc (sizeof(float)*input_size*input_size); float *maxpool_output_buf = (float*) malloc (sizeof(float)*maxpool_output_size*maxpool_output_size); // copy results from device to host cudaMemcpy(gemm_output_buf, gemm_output, sizeof(float)*input_size*input_size, cudaMemcpyDeviceToHost); cudaMemcpy(maxpool_output_buf, maxpool_output, sizeof(float)*maxpool_output_size*maxpool_output_size, cudaMemcpyDeviceToHost); // prints the results cout<<"\n========== GEMM OUTPUT ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<gemm_output_buf[i]<<" "; } cout<<"\n========== MAXPOOL OUTPUT ==========\n"; for (int i = 0; i < maxpool_output_size * maxpool_output_size; ++i) { if(i%maxpool_output_size==0) cout<<"\n"; cout<<maxpool_output_buf[i]<<" "; } cout<<'\n'; cudaFree(dev_mem_a); cudaFree(dev_mem_b); cudaFree(dev_mem_c); cudaFree(gemm_output); cudaFree(dev_mem_input); cudaFree(maxpool_output); free(gemm_output_buf); free(maxpool_output_buf); return 0; }
273716bfac2e9ea79f09bde6b668fff5fdfc99c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* function for projecting lidar points * */ #include "../common.h" __global__ void CameraTransformKernel(const float* const tform, const float* const cam, const size_t imWidth, const size_t imHeight, const float* const xIn, const float* const yIn, const float* const zIn, const size_t numPoints, float* const xOut, float* const yOut, bool* const valid){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= numPoints){ return; } //transform points float x = xIn[i]*tform[0] + yIn[i]*tform[4] + zIn[i]*tform[8] + tform[12]; float y = xIn[i]*tform[1] + yIn[i]*tform[5] + zIn[i]*tform[9] + tform[13]; float z = xIn[i]*tform[2] + yIn[i]*tform[6] + zIn[i]*tform[10] + tform[14]; bool v = true; //panoramic camera model y = (y/sqrt(z*z + x*x)); x = atan2(x,z); //apply projective camera matrix x = cam[0]*x + cam[3]*y + cam[6]*z + cam[9]; y = cam[1]*x + cam[4]*y + cam[7]*z + cam[10]; z = cam[2]*x + cam[5]*y + cam[8]*z + cam[11]; if((x < 0) || (y < 0) || (x >= imWidth) || (y >= imHeight)){ v = false; } //output points xOut[i] = x; yOut[i] = y; valid[i] = v; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { //initialize the MathWorks GPU API. mxInitGPU(); //read data mxGPUArray const * tformMat = mxGPUCreateFromMxArray(prhs[0]); mxGPUArray const * camMat = mxGPUCreateFromMxArray(prhs[1]); mxGPUArray const * pointsMat = mxGPUCreateFromMxArray(prhs[2]); size_t imWidth = ((uint32_T *) mxGetData(prhs[3]))[1]; size_t imHeight = ((uint32_T *) mxGetData(prhs[3]))[0]; size_t numPoints = mxGPUGetDimensions(pointsMat)[0]; //get input pointers float* tformPtr = (float*)(mxGPUGetDataReadOnly(tformMat)); float* camPtr = (float*)(mxGPUGetDataReadOnly(camMat)); float* xInPtr = (float*)(mxGPUGetDataReadOnly(pointsMat)); float* yInPtr = &(xInPtr[numPoints]); float* zInPtr = &(yInPtr[numPoints]); //create output mwSize outSize[] = {numPoints,2}; mxGPUArray* outMat = mxGPUCreateGPUArray(2, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); plhs[1] = mxGPUCreateMxArrayOnGPU(outMat); outSize[1] = 1; mxGPUArray* validMat = mxGPUCreateGPUArray(2, outSize, mxLOGICAL_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); plhs[0] = mxGPUCreateMxArrayOnGPU(validMat); float* xOutPtr = (float*)(mxGPUGetData(outMat)); float* yOutPtr = &(xOutPtr[numPoints]); bool* validPtr = (bool*)(mxGPUGetData(validMat)); //run and get ouputs hipLaunchKernelGGL(( CameraTransformKernel), dim3(gridSize(numPoints)), dim3(BLOCK_SIZE), 0, 0, tformPtr, camPtr, imWidth, imHeight, xInPtr, yInPtr, zInPtr, numPoints, xOutPtr, yOutPtr,validPtr); CudaCheckError(); //destroy reference structures mxGPUDestroyGPUArray(tformMat); mxGPUDestroyGPUArray(camMat); mxGPUDestroyGPUArray(pointsMat); mxGPUDestroyGPUArray(outMat); mxGPUDestroyGPUArray(validMat); }
273716bfac2e9ea79f09bde6b668fff5fdfc99c2.cu
/* function for projecting lidar points * */ #include "../common.h" __global__ void CameraTransformKernel(const float* const tform, const float* const cam, const size_t imWidth, const size_t imHeight, const float* const xIn, const float* const yIn, const float* const zIn, const size_t numPoints, float* const xOut, float* const yOut, bool* const valid){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= numPoints){ return; } //transform points float x = xIn[i]*tform[0] + yIn[i]*tform[4] + zIn[i]*tform[8] + tform[12]; float y = xIn[i]*tform[1] + yIn[i]*tform[5] + zIn[i]*tform[9] + tform[13]; float z = xIn[i]*tform[2] + yIn[i]*tform[6] + zIn[i]*tform[10] + tform[14]; bool v = true; //panoramic camera model y = (y/sqrt(z*z + x*x)); x = atan2(x,z); //apply projective camera matrix x = cam[0]*x + cam[3]*y + cam[6]*z + cam[9]; y = cam[1]*x + cam[4]*y + cam[7]*z + cam[10]; z = cam[2]*x + cam[5]*y + cam[8]*z + cam[11]; if((x < 0) || (y < 0) || (x >= imWidth) || (y >= imHeight)){ v = false; } //output points xOut[i] = x; yOut[i] = y; valid[i] = v; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { //initialize the MathWorks GPU API. mxInitGPU(); //read data mxGPUArray const * tformMat = mxGPUCreateFromMxArray(prhs[0]); mxGPUArray const * camMat = mxGPUCreateFromMxArray(prhs[1]); mxGPUArray const * pointsMat = mxGPUCreateFromMxArray(prhs[2]); size_t imWidth = ((uint32_T *) mxGetData(prhs[3]))[1]; size_t imHeight = ((uint32_T *) mxGetData(prhs[3]))[0]; size_t numPoints = mxGPUGetDimensions(pointsMat)[0]; //get input pointers float* tformPtr = (float*)(mxGPUGetDataReadOnly(tformMat)); float* camPtr = (float*)(mxGPUGetDataReadOnly(camMat)); float* xInPtr = (float*)(mxGPUGetDataReadOnly(pointsMat)); float* yInPtr = &(xInPtr[numPoints]); float* zInPtr = &(yInPtr[numPoints]); //create output mwSize outSize[] = {numPoints,2}; mxGPUArray* outMat = mxGPUCreateGPUArray(2, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); plhs[1] = mxGPUCreateMxArrayOnGPU(outMat); outSize[1] = 1; mxGPUArray* validMat = mxGPUCreateGPUArray(2, outSize, mxLOGICAL_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); plhs[0] = mxGPUCreateMxArrayOnGPU(validMat); float* xOutPtr = (float*)(mxGPUGetData(outMat)); float* yOutPtr = &(xOutPtr[numPoints]); bool* validPtr = (bool*)(mxGPUGetData(validMat)); //run and get ouputs CameraTransformKernel<<<gridSize(numPoints), BLOCK_SIZE>>>(tformPtr, camPtr, imWidth, imHeight, xInPtr, yInPtr, zInPtr, numPoints, xOutPtr, yOutPtr,validPtr); CudaCheckError(); //destroy reference structures mxGPUDestroyGPUArray(tformMat); mxGPUDestroyGPUArray(camMat); mxGPUDestroyGPUArray(pointsMat); mxGPUDestroyGPUArray(outMat); mxGPUDestroyGPUArray(validMat); }
9abb072738e15a75aeaa5c323cb40bb276f62c41.hip
// !!! This is a file automatically generated by hipify!!! #include "IO.h" int pw_old; const std::string constHeader("Constants.h"); std::string path; std::string fool_num_plot; std::ofstream *ofs; template <int max> struct getHomingFood{ int operator()(int i){ static const int n = max-1; static thrust::plus<int> binary_op; if(n==i){ return thrust::transform_reduce(ants_d_ptr, ants_d_ptr+MACRO_NMAX, getHomingWithFoodNum<n>(), 0, binary_op);; } else{ return getHomingFood<n>()(i); } } }; template <> struct getHomingFood<0>{ int operator()(int i){ return -1; } }; template <AntCharacter ch,int max> struct getHomingTypeAndFood{ int operator()(int i){ static const int n = max-1; static thrust::plus<int> binary_op; if(n==i){ return thrust::transform_reduce(ants_d_ptr, ants_d_ptr+MACRO_NMAX, getHomingWithTypeAndFoodNum<ch,n>(), 0, binary_op);; } else{ return getHomingTypeAndFood<ch,n>()(i); } } }; template <AntCharacter ch> struct getHomingTypeAndFood<ch,0>{ int operator()(int i){ return -1; } }; struct getPheroAroundFood{ __device__ double operator()(const Food food) const{ int i = food.i; int j = food.j; double sum = 0.0; sum += cells_d[i][j].phero; Cell *c=NULL; for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){ c = getCell(cells_d,i,j,dir); sum += c->phero; } return sum/7.0; } }; template <class T> std::string toString(T t){ static std::ostringstream ss; ss << (T)t; std::string str(ss.str()); ss.str(""); ss.clear(std::stringstream::goodbit); return str; } void IOInit(){ struct stat st; pw_old = -1; std::string nmax = toString(MACRO_NMAX); std::string max = toString(MACRO_MAX); std::string initDir(nmax+"ants_"+max+"x"+max+"cells"); if(stat(initDir.c_str(), &st) != 0){ #ifdef _WIN32 _mkdir(initDir.c_str()); #else mkdir(initDir.c_str(), 0775); #endif } std::string fnum = toString(MACRO_NUM_FOODS); std::string fNumDir(initDir+"/"+fnum+"foodnum"); if(stat(fNumDir.c_str(), &st) != 0){ #ifdef _WIN32 _mkdir(fNumDir.c_str()); #else mkdir(fNumDir.c_str(), 0775); #endif } std::string fsource = toString(MACRO_FOODSOURCE); std::string fdist = toString(MACRO_FOOD_DIST); std::string fCondDir(fNumDir+"/"+fsource+"initfvol_"+fdist+"fdist"); if(stat(fCondDir.c_str(), &st) != 0){ #ifdef _WIN32 _mkdir(fCondDir.c_str()); #else mkdir(fCondDir.c_str(), 0775); #endif } std::string step = toString(MACRO_MAX_STEP); std::string angle = toString(MACRO_FOOD_ANGLE); std::string stepAngleDir(fCondDir+"/"+step+"steps_"+angle+"deg"); if(stat(stepAngleDir.c_str(), &st) != 0){ #ifdef _WIN32 _mkdir(stepAngleDir.c_str()); #else mkdir(stepAngleDir.c_str(), 0775); #endif } path = std::string(stepAngleDir+"/"); fool_num_plot = std::string(path+step+"steps_"+angle+"deg.dat"); ofs = new std::ofstream(fool_num_plot.c_str()); std::ifstream constifs(constHeader.c_str()); std::ofstream constofs((path+constHeader).c_str()); constofs << constifs.rdbuf() << std::flush; } void IOEffWrite(int pw, int n, double sum){ if(pw_old<pw){ pw_old = pw; (*ofs) << std::endl; } (*ofs) << pw << " " << n << " " << (sum/(MACRO_MAX_STEP))/(MACRO_MAX_TIME-1000) << std::endl; } void IOCellWrite(int pw, int n){ std::string pwstr = toString(pw); std::string nstr = toString(n); std::string anglestr = toString(MACRO_FOOD_ANGLE); std::string celldata(path+"cell_"+anglestr+"deg_10e-"+pwstr+"_"+nstr+"normal"+".dat"); std::ofstream cellfs(celldata.c_str()); hipMemcpyFromSymbol(cells,cells_d,MACRO_MAX*MACRO_MAX*sizeof(Cell),0); for(int i=0; i<MACRO_MAX; i++){ for(int j=0; j<MACRO_MAX; j++){ cellfs << cells[j][i].cart.x << " " << cells[j][i].cart.y << " " << cells[j][i].phero << std::endl; } cellfs << std::endl; } } void IOEffPoll(int pw, int n, int sample, int t){ static getHomingWithType<NORMAL_CH> normalOp; static getHomingWithType<FOOL_CH> ahoOp; static thrust::plus<int> binary_op; static int homingFoods[MACRO_NUM_FOODS]; static int foolHomingFoods[MACRO_NUM_FOODS]; static int normalHomingFoods[MACRO_NUM_FOODS]; static getHomingFood<MACRO_NUM_FOODS> homingFoodFunctor; static getHomingTypeAndFood<FOOL_CH,MACRO_NUM_FOODS> foolHomingFoodFunctor; static getHomingTypeAndFood<NORMAL_CH,MACRO_NUM_FOODS> normalHomingFoodFunctor; static thrust::host_vector<double> phero_h(MACRO_NUM_FOODS); static thrust::device_vector<double> phero_d(MACRO_NUM_FOODS); std::string stepstr = toString(MACRO_MAX_STEP); std::string anglestr = toString(MACRO_FOOD_ANGLE); std::string pwstr = toString(pw); std::string nstr = toString(n); std::string samplestr = toString(sample); std::string celldata(path+"food_"+anglestr+"deg_10e-"+pwstr+"_"+nstr+"normal"+"_sampleNo"+samplestr+"_of_"+stepstr+".dat"); std::ofstream pollfs(celldata.c_str(),std::ios::out | std::ios::app); int nor = thrust::transform_reduce(ants_d_ptr, ants_d_ptr+MACRO_NMAX, normalOp, 0, binary_op); int aho = thrust::transform_reduce(ants_d_ptr, ants_d_ptr+MACRO_NMAX, ahoOp, 0, binary_op); thrust::transform(foods_d_ptr, foods_d_ptr+MACRO_NUM_FOODS, phero_d.begin(), getPheroAroundFood()); thrust::copy(phero_d.begin(), phero_d.end(), phero_h.begin()); pollfs << t << " "; for (int i=0; i<MACRO_NUM_FOODS; i++){ //homingFoods[i] = homingFoodFunctor(i); foolHomingFoods[i] = foolHomingFoodFunctor(i); normalHomingFoods[i] = normalHomingFoodFunctor(i); homingFoods[i]=foolHomingFoods[i]+normalHomingFoods[i]; } for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << homingFoods[i] << " "; } pollfs << nor << " " << aho << " " << nor/(double)n << " " << aho/(double)(MACRO_NMAX-n) << " " << (nor+aho)<< " "; for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << normalHomingFoods[i] << " "; } for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << foolHomingFoods[i] << " "; } for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << normalHomingFoods[i]/(double)n << " "; } for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << foolHomingFoods[i]/(double)(MACRO_NMAX-n) << " "; } pollfs << (nor+aho) << " "; for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << phero_h[i] << " "; } pollfs << std::endl; }
9abb072738e15a75aeaa5c323cb40bb276f62c41.cu
#include "IO.h" int pw_old; const std::string constHeader("Constants.h"); std::string path; std::string fool_num_plot; std::ofstream *ofs; template <int max> struct getHomingFood{ int operator()(int i){ static const int n = max-1; static thrust::plus<int> binary_op; if(n==i){ return thrust::transform_reduce(ants_d_ptr, ants_d_ptr+MACRO_NMAX, getHomingWithFoodNum<n>(), 0, binary_op);; } else{ return getHomingFood<n>()(i); } } }; template <> struct getHomingFood<0>{ int operator()(int i){ return -1; } }; template <AntCharacter ch,int max> struct getHomingTypeAndFood{ int operator()(int i){ static const int n = max-1; static thrust::plus<int> binary_op; if(n==i){ return thrust::transform_reduce(ants_d_ptr, ants_d_ptr+MACRO_NMAX, getHomingWithTypeAndFoodNum<ch,n>(), 0, binary_op);; } else{ return getHomingTypeAndFood<ch,n>()(i); } } }; template <AntCharacter ch> struct getHomingTypeAndFood<ch,0>{ int operator()(int i){ return -1; } }; struct getPheroAroundFood{ __device__ double operator()(const Food food) const{ int i = food.i; int j = food.j; double sum = 0.0; sum += cells_d[i][j].phero; Cell *c=NULL; for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){ c = getCell(cells_d,i,j,dir); sum += c->phero; } return sum/7.0; } }; template <class T> std::string toString(T t){ static std::ostringstream ss; ss << (T)t; std::string str(ss.str()); ss.str(""); ss.clear(std::stringstream::goodbit); return str; } void IOInit(){ struct stat st; pw_old = -1; std::string nmax = toString(MACRO_NMAX); std::string max = toString(MACRO_MAX); std::string initDir(nmax+"ants_"+max+"x"+max+"cells"); if(stat(initDir.c_str(), &st) != 0){ #ifdef _WIN32 _mkdir(initDir.c_str()); #else mkdir(initDir.c_str(), 0775); #endif } std::string fnum = toString(MACRO_NUM_FOODS); std::string fNumDir(initDir+"/"+fnum+"foodnum"); if(stat(fNumDir.c_str(), &st) != 0){ #ifdef _WIN32 _mkdir(fNumDir.c_str()); #else mkdir(fNumDir.c_str(), 0775); #endif } std::string fsource = toString(MACRO_FOODSOURCE); std::string fdist = toString(MACRO_FOOD_DIST); std::string fCondDir(fNumDir+"/"+fsource+"initfvol_"+fdist+"fdist"); if(stat(fCondDir.c_str(), &st) != 0){ #ifdef _WIN32 _mkdir(fCondDir.c_str()); #else mkdir(fCondDir.c_str(), 0775); #endif } std::string step = toString(MACRO_MAX_STEP); std::string angle = toString(MACRO_FOOD_ANGLE); std::string stepAngleDir(fCondDir+"/"+step+"steps_"+angle+"deg"); if(stat(stepAngleDir.c_str(), &st) != 0){ #ifdef _WIN32 _mkdir(stepAngleDir.c_str()); #else mkdir(stepAngleDir.c_str(), 0775); #endif } path = std::string(stepAngleDir+"/"); fool_num_plot = std::string(path+step+"steps_"+angle+"deg.dat"); ofs = new std::ofstream(fool_num_plot.c_str()); std::ifstream constifs(constHeader.c_str()); std::ofstream constofs((path+constHeader).c_str()); constofs << constifs.rdbuf() << std::flush; } void IOEffWrite(int pw, int n, double sum){ if(pw_old<pw){ pw_old = pw; (*ofs) << std::endl; } (*ofs) << pw << " " << n << " " << (sum/(MACRO_MAX_STEP))/(MACRO_MAX_TIME-1000) << std::endl; } void IOCellWrite(int pw, int n){ std::string pwstr = toString(pw); std::string nstr = toString(n); std::string anglestr = toString(MACRO_FOOD_ANGLE); std::string celldata(path+"cell_"+anglestr+"deg_10e-"+pwstr+"_"+nstr+"normal"+".dat"); std::ofstream cellfs(celldata.c_str()); cudaMemcpyFromSymbol(cells,cells_d,MACRO_MAX*MACRO_MAX*sizeof(Cell),0); for(int i=0; i<MACRO_MAX; i++){ for(int j=0; j<MACRO_MAX; j++){ cellfs << cells[j][i].cart.x << " " << cells[j][i].cart.y << " " << cells[j][i].phero << std::endl; } cellfs << std::endl; } } void IOEffPoll(int pw, int n, int sample, int t){ static getHomingWithType<NORMAL_CH> normalOp; static getHomingWithType<FOOL_CH> ahoOp; static thrust::plus<int> binary_op; static int homingFoods[MACRO_NUM_FOODS]; static int foolHomingFoods[MACRO_NUM_FOODS]; static int normalHomingFoods[MACRO_NUM_FOODS]; static getHomingFood<MACRO_NUM_FOODS> homingFoodFunctor; static getHomingTypeAndFood<FOOL_CH,MACRO_NUM_FOODS> foolHomingFoodFunctor; static getHomingTypeAndFood<NORMAL_CH,MACRO_NUM_FOODS> normalHomingFoodFunctor; static thrust::host_vector<double> phero_h(MACRO_NUM_FOODS); static thrust::device_vector<double> phero_d(MACRO_NUM_FOODS); std::string stepstr = toString(MACRO_MAX_STEP); std::string anglestr = toString(MACRO_FOOD_ANGLE); std::string pwstr = toString(pw); std::string nstr = toString(n); std::string samplestr = toString(sample); std::string celldata(path+"food_"+anglestr+"deg_10e-"+pwstr+"_"+nstr+"normal"+"_sampleNo"+samplestr+"_of_"+stepstr+".dat"); std::ofstream pollfs(celldata.c_str(),std::ios::out | std::ios::app); int nor = thrust::transform_reduce(ants_d_ptr, ants_d_ptr+MACRO_NMAX, normalOp, 0, binary_op); int aho = thrust::transform_reduce(ants_d_ptr, ants_d_ptr+MACRO_NMAX, ahoOp, 0, binary_op); thrust::transform(foods_d_ptr, foods_d_ptr+MACRO_NUM_FOODS, phero_d.begin(), getPheroAroundFood()); thrust::copy(phero_d.begin(), phero_d.end(), phero_h.begin()); pollfs << t << " "; for (int i=0; i<MACRO_NUM_FOODS; i++){ //homingFoods[i] = homingFoodFunctor(i); foolHomingFoods[i] = foolHomingFoodFunctor(i); normalHomingFoods[i] = normalHomingFoodFunctor(i); homingFoods[i]=foolHomingFoods[i]+normalHomingFoods[i]; } for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << homingFoods[i] << " "; } pollfs << nor << " " << aho << " " << nor/(double)n << " " << aho/(double)(MACRO_NMAX-n) << " " << (nor+aho)<< " "; for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << normalHomingFoods[i] << " "; } for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << foolHomingFoods[i] << " "; } for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << normalHomingFoods[i]/(double)n << " "; } for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << foolHomingFoods[i]/(double)(MACRO_NMAX-n) << " "; } pollfs << (nor+aho) << " "; for (int i=0; i<MACRO_NUM_FOODS; i++){ pollfs << phero_h[i] << " "; } pollfs << std::endl; }
68d366e8448584a6b6ea0f8bfc1d3bc1bc2dea8c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cstdio> #include<cstdlib> #include<iostream> #define DFL_LEN 32 #define MAX_THREADS_PER_BLOCK 1024 //supported by hardware, run ./deviceQuery to determine //cuda error checking #define check_error(ans) {cudaCheckError((ans),__FILE__,__LINE__);} inline void cudaCheckError(hipError_t e,const char *file,int line,bool abort = true){ if(e != hipSuccess){ fprintf(stderr,"GPUassert: %s\nFile: %s\nLine: %d\n",hipGetErrorString(e),file,line); if(abort) exit(e); } } //end of error checking typedef long int g_type; //struct declarations and global variables begin here struct vertex{ g_type number; g_type start; int n; }; struct map{ g_type node; g_type index; }; struct entry{ g_type edge; double val; }; g_type *edges; g_type edges_length; g_type edges_size; g_type edges_itr; struct vertex *vertex_list; g_type vertex_length; g_type vertex_size; g_type vertex_itr; struct entry *transitions; struct map *node_map; double *ranks; double *result; //end of struct and global definitions //start of interface int init_edges(){ if(edges != NULL) return 0; edges = (g_type *)malloc(DFL_LEN * sizeof(g_type)); if(edges == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } edges_size = DFL_LEN; edges_length = 0; edges_itr = 0; return 1; } void delete_edges(){ edges_length = 0; edges_size = DFL_LEN; edges_itr = 0; if(edges != NULL) free(edges); } int add_edge(int edge){ if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } if(edges_length == edges_size){ edges_size *= 2; edges = (g_type *)realloc(edges,edges_size * sizeof(g_type)); if(edges == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } } edges[edges_length] = edge; edges_length++; return 1; } int get_edge(g_type *e){ if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } if(e == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } g_type val = edges[edges_itr]; edges_itr++; if(edges_itr >= edges_size){ edges_itr = edges_itr % edges_size; } *e = val; return 1; } void reset_edge(){ edges_itr = 0; } void move_edge(g_type index){ edges_itr = index; } int init_vertices(){ if(vertex_list != NULL) return 0; vertex_list = (struct vertex *)malloc(DFL_LEN * sizeof(struct vertex)); if(vertex_list == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } vertex_length = 0; vertex_size = DFL_LEN; vertex_itr = 0; return 1; } void delete_vertices(){ vertex_itr = 0; vertex_length = 0; vertex_size = 0; if(vertex_list != NULL) free(vertex_list); } int add_vertex(struct vertex v){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } if(vertex_length == vertex_size){ vertex_size *= 2; vertex_list = (struct vertex *)realloc(vertex_list,vertex_size * sizeof(struct vertex)); if(vertex_list == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } } vertex_list[vertex_length].number = v.number; vertex_list[vertex_length].n = v.n; vertex_list[vertex_length].start = v.start; g_type temp = vertex_length; vertex_length++; return temp; } int get_vertex(struct vertex *v){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } v->number = vertex_list[vertex_itr].number; v->start = vertex_list[vertex_itr].start; v->n = vertex_list[vertex_itr].n; vertex_itr++; if(vertex_itr >= vertex_size){ vertex_itr = vertex_itr % vertex_size; } return 1; } void reset_vertex(){ vertex_itr = 0; } void move_vertex(g_type index){ vertex_itr = index; } void build_graph(FILE *fp){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(fp == NULL){ fprintf(stderr,"File pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } g_type from,to; int seen = 0; g_type cur = -1; while(fscanf(fp,"%ld %ld",&from,&to) != -1){ if(from == vertex_list[cur].number && vertex_length != 0){ seen = 1; } else{ seen = 0; } if(!seen){ struct vertex temp; temp.number = from; temp.start = edges_length; temp.n = 0; cur = add_vertex(temp); } add_edge(to); vertex_list[cur].n++; } } void create_map(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(node_map == NULL){ node_map = (struct map *)malloc(vertex_length * sizeof(struct map)); if(node_map == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(int i=0;i<vertex_length;i++){ node_map[i].node = vertex_list[i].number; node_map[i].index = i; } } g_type search_map(g_type node){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return -1; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return -1; } if(node_map == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return -1; } for(int i=0;i<vertex_length;i++){ if(node_map[i].node == node) return node_map[i].index; } return -1; } __device__ g_type search_dmap(struct map *d_map,g_type *d_vlength,g_type node){ if(d_map == NULL){ return -1; } g_type len = *d_vlength; for(g_type i=0;i<len;i++){ if(d_map[i].node == node) return d_map[i].index; } return -1; } void delete_map(){ if(node_map != NULL) free(node_map); } void init_ranks(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(ranks == NULL){ ranks = (double *)malloc(vertex_length * sizeof(double)); if(ranks == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(int i=0;i<vertex_length;i++){ ranks[i] = 0.25; } } void delete_ranks(){ if(ranks != NULL) free(ranks); } void init_transitions(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(transitions == NULL){ transitions = (struct entry *)malloc(edges_length * sizeof(struct entry)); if(transitions == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(g_type i=0;i<vertex_length;i++){ g_type start = vertex_list[i].start; g_type j = start; int n = vertex_list[i].n; while(j < start + n){ transitions[j].edge = edges[j]; transitions[j].val = 1.0 / vertex_list[i].n; j++; } } } void delete_transitions(){ if(transitions != NULL) free(transitions); } void init_result(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(result == NULL){ result = (double *)malloc(vertex_length * sizeof(double)); if(result == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(int i=0;i<vertex_length;i++){ result[i] = 0.0; } } void delete_result(){ if(result != NULL) free(result); } int pagerank(){ for(int i=0;i<vertex_length;i++){ for(int j = vertex_list[i].start; j < vertex_list[i].start + vertex_list[i].n; j++){ double temp = transitions[j].val * ranks[i]; g_type index = search_map(transitions[j].edge); result[index] += temp; } } return 1; } void update_ranks(){ for(int i=0;i<vertex_length;i++){ ranks[i] = result[i]; result[i] = 0.0; } } //end of interface //CUDA kernels __global__ void multiply_kernel(struct vertex *d_vertices,struct entry *d_transitions,struct map *d_map,double *d_ranks,double *d_tempranks,g_type *d_vlength){ int threadId = blockDim.x * blockIdx.x + threadIdx.x; double b = d_ranks[threadId]; g_type len = *d_vlength; if(threadId < len){ for(g_type i = d_vertices[threadId].start;i < d_vertices[threadId].start + d_vertices[threadId].n;i++){ double a = d_transitions[i].val; int index = search_dmap(d_map,d_vlength,d_transitions[i].edge); double res = a * b; double temp = d_tempranks[index]; __syncthreads(); temp += res; d_tempranks[index] = temp; __syncthreads(); } } } //deprecated __global__ void add_kernel(struct vertex *d_vertices,struct entry *d_transitions,double *d_res,struct map *d_map,double *d_tempranks,g_type *d_vlength){ int threadId = blockDim.x * blockIdx.x + threadIdx.x; g_type len = *d_vlength; if(threadId < len){ for(g_type i = d_vertices[threadId].start;i < d_vertices[threadId].start + d_vertices[threadId].n;i++){ int index = search_dmap(d_map,d_vlength,d_transitions[i].edge); double val = d_res[i]; double temp = d_tempranks[index]; __syncthreads(); temp += val; d_tempranks[index] = temp; __syncthreads(); } } } __global__ void update_kernel(double *d_tempranks,double *d_ranks,g_type *d_vlength){ int threadId = blockDim.x * blockIdx.x + threadIdx.x; g_type len = *d_vlength; if(threadId < len){ d_ranks[threadId] = d_tempranks[threadId]; } } //end of CUDA kernels //main program begins here int main(int argc,char **argv){ if(argc != 4){ fprintf(stderr,"Correct usage: %s <pathToGraph> <numIterations> <serial = 0/parallel = 1>\n",argv[0]); exit(1); } FILE *fp = fopen(argv[1],"r"); const int iterations = atoi(argv[2]); const int mode = atoi(argv[3]); init_vertices(); init_edges(); build_graph(fp); create_map(); init_ranks(); init_transitions(); if(mode == 1){ //initializing device memory g_type *d_elength; check_error(hipMalloc((void **)&d_elength,sizeof(g_type))); check_error(hipMemcpy(d_elength,&edges_length,sizeof(g_type),hipMemcpyHostToDevice)); g_type *d_vlength; check_error(hipMalloc((void **)&d_vlength,sizeof(g_type))); check_error(hipMemcpy(d_vlength,&vertex_length,sizeof(g_type),hipMemcpyHostToDevice)); struct vertex *d_vertices; check_error(hipMalloc((void **)&d_vertices,vertex_length * sizeof(struct vertex))); check_error(hipMemcpy(d_vertices,vertex_list,vertex_length * sizeof(struct vertex),hipMemcpyHostToDevice)); struct entry *d_transitions; check_error(hipMalloc((void **)&d_transitions,edges_length * sizeof(struct entry))); check_error(hipMemcpy(d_transitions,transitions,edges_length * sizeof(struct entry),hipMemcpyHostToDevice)); struct map *d_map; check_error(hipMalloc((void **)&d_map,vertex_length * sizeof(struct map))); check_error(hipMemcpy(d_map,node_map,vertex_length * sizeof(struct map),hipMemcpyHostToDevice)); double *d_ranks; check_error(hipMalloc((void **)&d_ranks,vertex_length * sizeof(double))); check_error(hipMemcpy(d_ranks,ranks,vertex_length * sizeof(double),hipMemcpyHostToDevice)); double *d_res; check_error(hipMalloc((void **)&d_res,edges_length * sizeof(double))); double *d_tempranks; check_error(hipMalloc((void **)&d_tempranks,vertex_length * sizeof(double))); //pagerank iterations begin here: Power method int blocks = 1; int threads = vertex_length; if(vertex_length > MAX_THREADS_PER_BLOCK){ blocks = (int)ceil(vertex_length / (double)MAX_THREADS_PER_BLOCK); threads = MAX_THREADS_PER_BLOCK; } int counter = 0; clock_t begin = clock(); while(counter < iterations){ check_error(hipMemset(d_res,0.0,edges_length * sizeof(double))); check_error(hipMemset(d_tempranks,0.0,vertex_length * sizeof(double))); hipLaunchKernelGGL(( multiply_kernel), dim3(blocks),dim3(threads), 0, 0, d_vertices,d_transitions,d_map,d_ranks,d_tempranks,d_vlength); hipDeviceSynchronize(); //add_kernel<<<blocks,threads>>>(d_vertices,d_transitions,d_res,d_map,d_tempranks,d_vlength); //hipDeviceSynchronize(); hipLaunchKernelGGL(( update_kernel), dim3(blocks),dim3(threads), 0, 0, d_tempranks,d_ranks,d_vlength); hipDeviceSynchronize(); counter++; } clock_t end = clock(); //end of pagerank iterations double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; double *res; res = (double *)malloc(vertex_length * sizeof(double)); check_error(hipMemcpy(res,d_ranks,vertex_length * sizeof(double),hipMemcpyDeviceToHost)); for(int i = 0;i<vertex_length;i++){ printf("%lf\n",res[i]); } free(res); printf("%lf s\n",time_spent); check_error(hipFree(d_elength)); check_error(hipFree(d_vlength)); check_error(hipFree(d_vertices)); check_error(hipFree(d_transitions)); check_error(hipFree(d_map)); check_error(hipFree(d_ranks)); check_error(hipFree(d_res)); check_error(hipFree(d_tempranks)); } else{ clock_t begin = clock(); init_result(); int counter = 0; while(counter < iterations){ if(!pagerank()){ fprintf(stderr,"Pagerank failed in iteration: %d\n",counter); break; } update_ranks(); counter++; } clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; for(int i = 0;i<vertex_length;i++){ printf("%lf\n",ranks[i]); } printf("%lf s\n",time_spent); } //end of device memory initialization delete_edges(); delete_vertices(); delete_ranks(); delete_transitions(); delete_map(); delete_result(); return 0; }
68d366e8448584a6b6ea0f8bfc1d3bc1bc2dea8c.cu
#include<cstdio> #include<cstdlib> #include<iostream> #define DFL_LEN 32 #define MAX_THREADS_PER_BLOCK 1024 //supported by hardware, run ./deviceQuery to determine //cuda error checking #define check_error(ans) {cudaCheckError((ans),__FILE__,__LINE__);} inline void cudaCheckError(cudaError_t e,const char *file,int line,bool abort = true){ if(e != cudaSuccess){ fprintf(stderr,"GPUassert: %s\nFile: %s\nLine: %d\n",cudaGetErrorString(e),file,line); if(abort) exit(e); } } //end of error checking typedef long int g_type; //struct declarations and global variables begin here struct vertex{ g_type number; g_type start; int n; }; struct map{ g_type node; g_type index; }; struct entry{ g_type edge; double val; }; g_type *edges; g_type edges_length; g_type edges_size; g_type edges_itr; struct vertex *vertex_list; g_type vertex_length; g_type vertex_size; g_type vertex_itr; struct entry *transitions; struct map *node_map; double *ranks; double *result; //end of struct and global definitions //start of interface int init_edges(){ if(edges != NULL) return 0; edges = (g_type *)malloc(DFL_LEN * sizeof(g_type)); if(edges == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } edges_size = DFL_LEN; edges_length = 0; edges_itr = 0; return 1; } void delete_edges(){ edges_length = 0; edges_size = DFL_LEN; edges_itr = 0; if(edges != NULL) free(edges); } int add_edge(int edge){ if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } if(edges_length == edges_size){ edges_size *= 2; edges = (g_type *)realloc(edges,edges_size * sizeof(g_type)); if(edges == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } } edges[edges_length] = edge; edges_length++; return 1; } int get_edge(g_type *e){ if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } if(e == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } g_type val = edges[edges_itr]; edges_itr++; if(edges_itr >= edges_size){ edges_itr = edges_itr % edges_size; } *e = val; return 1; } void reset_edge(){ edges_itr = 0; } void move_edge(g_type index){ edges_itr = index; } int init_vertices(){ if(vertex_list != NULL) return 0; vertex_list = (struct vertex *)malloc(DFL_LEN * sizeof(struct vertex)); if(vertex_list == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } vertex_length = 0; vertex_size = DFL_LEN; vertex_itr = 0; return 1; } void delete_vertices(){ vertex_itr = 0; vertex_length = 0; vertex_size = 0; if(vertex_list != NULL) free(vertex_list); } int add_vertex(struct vertex v){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } if(vertex_length == vertex_size){ vertex_size *= 2; vertex_list = (struct vertex *)realloc(vertex_list,vertex_size * sizeof(struct vertex)); if(vertex_list == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return 0; } } vertex_list[vertex_length].number = v.number; vertex_list[vertex_length].n = v.n; vertex_list[vertex_length].start = v.start; g_type temp = vertex_length; vertex_length++; return temp; } int get_vertex(struct vertex *v){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return 0; } v->number = vertex_list[vertex_itr].number; v->start = vertex_list[vertex_itr].start; v->n = vertex_list[vertex_itr].n; vertex_itr++; if(vertex_itr >= vertex_size){ vertex_itr = vertex_itr % vertex_size; } return 1; } void reset_vertex(){ vertex_itr = 0; } void move_vertex(g_type index){ vertex_itr = index; } void build_graph(FILE *fp){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(fp == NULL){ fprintf(stderr,"File pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } g_type from,to; int seen = 0; g_type cur = -1; while(fscanf(fp,"%ld %ld",&from,&to) != -1){ if(from == vertex_list[cur].number && vertex_length != 0){ seen = 1; } else{ seen = 0; } if(!seen){ struct vertex temp; temp.number = from; temp.start = edges_length; temp.n = 0; cur = add_vertex(temp); } add_edge(to); vertex_list[cur].n++; } } void create_map(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(node_map == NULL){ node_map = (struct map *)malloc(vertex_length * sizeof(struct map)); if(node_map == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(int i=0;i<vertex_length;i++){ node_map[i].node = vertex_list[i].number; node_map[i].index = i; } } g_type search_map(g_type node){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return -1; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return -1; } if(node_map == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return -1; } for(int i=0;i<vertex_length;i++){ if(node_map[i].node == node) return node_map[i].index; } return -1; } __device__ g_type search_dmap(struct map *d_map,g_type *d_vlength,g_type node){ if(d_map == NULL){ return -1; } g_type len = *d_vlength; for(g_type i=0;i<len;i++){ if(d_map[i].node == node) return d_map[i].index; } return -1; } void delete_map(){ if(node_map != NULL) free(node_map); } void init_ranks(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(ranks == NULL){ ranks = (double *)malloc(vertex_length * sizeof(double)); if(ranks == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(int i=0;i<vertex_length;i++){ ranks[i] = 0.25; } } void delete_ranks(){ if(ranks != NULL) free(ranks); } void init_transitions(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(transitions == NULL){ transitions = (struct entry *)malloc(edges_length * sizeof(struct entry)); if(transitions == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(g_type i=0;i<vertex_length;i++){ g_type start = vertex_list[i].start; g_type j = start; int n = vertex_list[i].n; while(j < start + n){ transitions[j].edge = edges[j]; transitions[j].val = 1.0 / vertex_list[i].n; j++; } } } void delete_transitions(){ if(transitions != NULL) free(transitions); } void init_result(){ if(vertex_list == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(edges == NULL){ fprintf(stderr,"Null pointer error in %s at line %d\n",__FILE__,__LINE__); return ; } if(result == NULL){ result = (double *)malloc(vertex_length * sizeof(double)); if(result == NULL){ fprintf(stderr,"Malloc failed in %s at line %d\n",__FILE__,__LINE__); return ; } } for(int i=0;i<vertex_length;i++){ result[i] = 0.0; } } void delete_result(){ if(result != NULL) free(result); } int pagerank(){ for(int i=0;i<vertex_length;i++){ for(int j = vertex_list[i].start; j < vertex_list[i].start + vertex_list[i].n; j++){ double temp = transitions[j].val * ranks[i]; g_type index = search_map(transitions[j].edge); result[index] += temp; } } return 1; } void update_ranks(){ for(int i=0;i<vertex_length;i++){ ranks[i] = result[i]; result[i] = 0.0; } } //end of interface //CUDA kernels __global__ void multiply_kernel(struct vertex *d_vertices,struct entry *d_transitions,struct map *d_map,double *d_ranks,double *d_tempranks,g_type *d_vlength){ int threadId = blockDim.x * blockIdx.x + threadIdx.x; double b = d_ranks[threadId]; g_type len = *d_vlength; if(threadId < len){ for(g_type i = d_vertices[threadId].start;i < d_vertices[threadId].start + d_vertices[threadId].n;i++){ double a = d_transitions[i].val; int index = search_dmap(d_map,d_vlength,d_transitions[i].edge); double res = a * b; double temp = d_tempranks[index]; __syncthreads(); temp += res; d_tempranks[index] = temp; __syncthreads(); } } } //deprecated __global__ void add_kernel(struct vertex *d_vertices,struct entry *d_transitions,double *d_res,struct map *d_map,double *d_tempranks,g_type *d_vlength){ int threadId = blockDim.x * blockIdx.x + threadIdx.x; g_type len = *d_vlength; if(threadId < len){ for(g_type i = d_vertices[threadId].start;i < d_vertices[threadId].start + d_vertices[threadId].n;i++){ int index = search_dmap(d_map,d_vlength,d_transitions[i].edge); double val = d_res[i]; double temp = d_tempranks[index]; __syncthreads(); temp += val; d_tempranks[index] = temp; __syncthreads(); } } } __global__ void update_kernel(double *d_tempranks,double *d_ranks,g_type *d_vlength){ int threadId = blockDim.x * blockIdx.x + threadIdx.x; g_type len = *d_vlength; if(threadId < len){ d_ranks[threadId] = d_tempranks[threadId]; } } //end of CUDA kernels //main program begins here int main(int argc,char **argv){ if(argc != 4){ fprintf(stderr,"Correct usage: %s <pathToGraph> <numIterations> <serial = 0/parallel = 1>\n",argv[0]); exit(1); } FILE *fp = fopen(argv[1],"r"); const int iterations = atoi(argv[2]); const int mode = atoi(argv[3]); init_vertices(); init_edges(); build_graph(fp); create_map(); init_ranks(); init_transitions(); if(mode == 1){ //initializing device memory g_type *d_elength; check_error(cudaMalloc((void **)&d_elength,sizeof(g_type))); check_error(cudaMemcpy(d_elength,&edges_length,sizeof(g_type),cudaMemcpyHostToDevice)); g_type *d_vlength; check_error(cudaMalloc((void **)&d_vlength,sizeof(g_type))); check_error(cudaMemcpy(d_vlength,&vertex_length,sizeof(g_type),cudaMemcpyHostToDevice)); struct vertex *d_vertices; check_error(cudaMalloc((void **)&d_vertices,vertex_length * sizeof(struct vertex))); check_error(cudaMemcpy(d_vertices,vertex_list,vertex_length * sizeof(struct vertex),cudaMemcpyHostToDevice)); struct entry *d_transitions; check_error(cudaMalloc((void **)&d_transitions,edges_length * sizeof(struct entry))); check_error(cudaMemcpy(d_transitions,transitions,edges_length * sizeof(struct entry),cudaMemcpyHostToDevice)); struct map *d_map; check_error(cudaMalloc((void **)&d_map,vertex_length * sizeof(struct map))); check_error(cudaMemcpy(d_map,node_map,vertex_length * sizeof(struct map),cudaMemcpyHostToDevice)); double *d_ranks; check_error(cudaMalloc((void **)&d_ranks,vertex_length * sizeof(double))); check_error(cudaMemcpy(d_ranks,ranks,vertex_length * sizeof(double),cudaMemcpyHostToDevice)); double *d_res; check_error(cudaMalloc((void **)&d_res,edges_length * sizeof(double))); double *d_tempranks; check_error(cudaMalloc((void **)&d_tempranks,vertex_length * sizeof(double))); //pagerank iterations begin here: Power method int blocks = 1; int threads = vertex_length; if(vertex_length > MAX_THREADS_PER_BLOCK){ blocks = (int)ceil(vertex_length / (double)MAX_THREADS_PER_BLOCK); threads = MAX_THREADS_PER_BLOCK; } int counter = 0; clock_t begin = clock(); while(counter < iterations){ check_error(cudaMemset(d_res,0.0,edges_length * sizeof(double))); check_error(cudaMemset(d_tempranks,0.0,vertex_length * sizeof(double))); multiply_kernel<<<blocks,threads>>>(d_vertices,d_transitions,d_map,d_ranks,d_tempranks,d_vlength); cudaDeviceSynchronize(); //add_kernel<<<blocks,threads>>>(d_vertices,d_transitions,d_res,d_map,d_tempranks,d_vlength); //cudaDeviceSynchronize(); update_kernel<<<blocks,threads>>>(d_tempranks,d_ranks,d_vlength); cudaDeviceSynchronize(); counter++; } clock_t end = clock(); //end of pagerank iterations double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; double *res; res = (double *)malloc(vertex_length * sizeof(double)); check_error(cudaMemcpy(res,d_ranks,vertex_length * sizeof(double),cudaMemcpyDeviceToHost)); for(int i = 0;i<vertex_length;i++){ printf("%lf\n",res[i]); } free(res); printf("%lf s\n",time_spent); check_error(cudaFree(d_elength)); check_error(cudaFree(d_vlength)); check_error(cudaFree(d_vertices)); check_error(cudaFree(d_transitions)); check_error(cudaFree(d_map)); check_error(cudaFree(d_ranks)); check_error(cudaFree(d_res)); check_error(cudaFree(d_tempranks)); } else{ clock_t begin = clock(); init_result(); int counter = 0; while(counter < iterations){ if(!pagerank()){ fprintf(stderr,"Pagerank failed in iteration: %d\n",counter); break; } update_ranks(); counter++; } clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; for(int i = 0;i<vertex_length;i++){ printf("%lf\n",ranks[i]); } printf("%lf s\n",time_spent); } //end of device memory initialization delete_edges(); delete_vertices(); delete_ranks(); delete_transitions(); delete_map(); delete_result(); return 0; }
f3cb7b57a2917cb51e6e08951272e96e9b9fbdef.hip
// !!! This is a file automatically generated by hipify!!! /* 1. Sprinkle masses along the floor randomly. 2. Apply external forces according to an arbitrary 2d function/field (like a magnetic force) that makes them move along the floor in different directions and speeds according to the flow of the function. 3. When two masses touch there is a probability based on their velocities that they stick together permanently with a spring. */ // #include <hiprand/hiprand.h> // #include <hiprand/hiprand_kernel.h> #include <iostream> using namespace std; #include <sim.h> #define random_double_number ((double) rand()/RAND_MAX) // __device__ Vec random_walk(CUDA_MASS *m) { // Vec f; // f[0] = (int(m->pos[0]*1000))%2-2; // f[1] = (int(m->pos[1]*1000))%2-2; // return f; // } __device__ Vec sam_field_force(CUDA_MASS * m) { Vec f = -0.01*m->pos/m->pos.norm(); f[2] = 0; return f; } __device__ Vec gravity_force(CUDA_MASS * m) { return m->m * Vec(0,0,-9.81); } __device__ funcptr f_sam_field = sam_field_force ; __device__ funcptr f_gravity = gravity_force; int main() { funcptr h_sam_field; funcptr h_gravity; if (hipSuccess != hipMemcpyFromSymbol (&h_sam_field, f_sam_field, sizeof (funcptr))) printf ("FAILED to get SYMBOL\n"); if (hipSuccess != hipMemcpyFromSymbol (&h_gravity, f_gravity, sizeof (funcptr))) printf ("FAILED to get SYMBOL\n"); Simulation sim; #ifdef GRAPHICS sim.setViewport(Vec(0,0,40), Vec(0,0,0), Vec(0,1,0)); #endif sim.setGlobalAcceleration(Vec(0,0,0)); sim.createPlane(Vec(0,0,1), 0); sim.createField(h_sam_field); sim.createField(h_gravity); for (unsigned i=0;i<100;i++) { Mass *m = sim.createMass(Vec(random_double_number*100-50,random_double_number*100-50,0)); //m->vel[1] = m->pos[0]*0.2 + random_double_number; //m->vel[0] = -m->pos[1]*0.2 - random_double_number; //m->vel[2] = 0; //m->damping = 0.99999; } sim.minimum_distance = 1; sim.start(); while(1) { std::this_thread::sleep_for(std::chrono::milliseconds(1)); sim.processCollision(); } }
f3cb7b57a2917cb51e6e08951272e96e9b9fbdef.cu
/* 1. Sprinkle masses along the floor randomly. 2. Apply external forces according to an arbitrary 2d function/field (like a magnetic force) that makes them move along the floor in different directions and speeds according to the flow of the function. 3. When two masses touch there is a probability based on their velocities that they stick together permanently with a spring. */ // #include <curand.h> // #include <curand_kernel.h> #include <iostream> using namespace std; #include <sim.h> #define random_double_number ((double) rand()/RAND_MAX) // __device__ Vec random_walk(CUDA_MASS *m) { // Vec f; // f[0] = (int(m->pos[0]*1000))%2-2; // f[1] = (int(m->pos[1]*1000))%2-2; // return f; // } __device__ Vec sam_field_force(CUDA_MASS * m) { Vec f = -0.01*m->pos/m->pos.norm(); f[2] = 0; return f; } __device__ Vec gravity_force(CUDA_MASS * m) { return m->m * Vec(0,0,-9.81); } __device__ funcptr f_sam_field = sam_field_force ; __device__ funcptr f_gravity = gravity_force; int main() { funcptr h_sam_field; funcptr h_gravity; if (cudaSuccess != cudaMemcpyFromSymbol (&h_sam_field, f_sam_field, sizeof (funcptr))) printf ("FAILED to get SYMBOL\n"); if (cudaSuccess != cudaMemcpyFromSymbol (&h_gravity, f_gravity, sizeof (funcptr))) printf ("FAILED to get SYMBOL\n"); Simulation sim; #ifdef GRAPHICS sim.setViewport(Vec(0,0,40), Vec(0,0,0), Vec(0,1,0)); #endif sim.setGlobalAcceleration(Vec(0,0,0)); sim.createPlane(Vec(0,0,1), 0); sim.createField(h_sam_field); sim.createField(h_gravity); for (unsigned i=0;i<100;i++) { Mass *m = sim.createMass(Vec(random_double_number*100-50,random_double_number*100-50,0)); //m->vel[1] = m->pos[0]*0.2 + random_double_number; //m->vel[0] = -m->pos[1]*0.2 - random_double_number; //m->vel[2] = 0; //m->damping = 0.99999; } sim.minimum_distance = 1; sim.start(); while(1) { std::this_thread::sleep_for(std::chrono::milliseconds(1)); sim.processCollision(); } }
df7b2ce38f652fb6c55e716d883d7b2c77a102c3.hip
// !!! This is a file automatically generated by hipify!!! #include "particles.cuh" #include "vec.h" #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> class DeviceVec; class BVHNode; static int gridSize; static int blockSize; static Particle *d_particles; static Particle *d_particles_out; // static int *d_particleIdxs; static unsigned int *mortonCodes; static unsigned int *d_morton_codes; static DeviceVec *d_forces; /** * 3D vector class for the device. */ class DeviceVec { public: // Member variables float x; float y; float z; public: // Methods __host__ __device__ DeviceVec(float x, float y, float z) : x(x), y(y), z(z) { } __host__ __device__ DeviceVec(DeviceVec *v) : x(v->x), y(v->y), z(v->z) { } __host__ __device__ DeviceVec(Vec3 *v) : x(v->x), y(v->y), z(v->z) { } __device__ float lengthSquared() { float sum = 0; sum += x * x; sum += y * y; sum += z * z; return sum; } __device__ float length() { return sqrt(lengthSquared()); } __device__ void toVec3(Vec3 *toFill) { toFill->x = x; toFill->y = y; toFill->z = z; } __device__ float dot(DeviceVec *other) { return x * other->x + y * other->y + z * other->z; } __device__ DeviceVec normalised() { float len = length(); DeviceVec normalised(x / len, y / len, z / len); return normalised; } __device__ DeviceVec *reflection(DeviceVec *normal) { DeviceVec tmp(x, y, z); tmp = tmp - (*normal * 2 * this->dot(normal)); return new DeviceVec(tmp); } // Operators __device__ DeviceVec operator+(const DeviceVec& other) { DeviceVec newVec(x + other.x, y + other.y, z + other.z); return newVec; } __device__ DeviceVec operator-(const DeviceVec& other) { DeviceVec newVec(x - other.x, y - other.y, z - other.z); return newVec; } __device__ DeviceVec operator-(const float scalar) { DeviceVec newVec(x - scalar, y - scalar, z - scalar); return newVec; } __device__ DeviceVec operator*(const DeviceVec& other) { DeviceVec newVec(x * other.x, y * other.y, z * other.z); return newVec; } __device__ DeviceVec operator*(const float scalar) { DeviceVec newVec(x * scalar, y * scalar, z * scalar); return newVec; } }; /** * Axis-Aligned Bounding Box (AABB) class. * Used for collision detection within the Bounding Volume * Hierarchy structure. */ class AABB { private: DeviceVec centre; float width; float height; float depth; // Accessor methods (half dimensions are pre-computed // to provide a small level of optimisation) __host__ __device__ float getLeft(float halfWidth) { return centre.x - halfWidth; } __host__ __device__ float getRight(float halfWidth) { return centre.x + halfWidth; } __host__ __device__ float getTop(float halfHeight) { return centre.y + halfHeight; } __host__ __device__ float getBottom(float halfHeight) { return centre.y - halfHeight; } __host__ __device__ float getFront(float halfDepth) { return centre.z + halfDepth; } __host__ __device__ float getBack(float halfDepth) { return centre.z - halfDepth; } public: __host__ __device__ AABB() : centre(0,0,0), width(0), height(0), depth(0) { } __host__ __device__ AABB(DeviceVec centre, float width, float height, float depth) : centre(centre), width(width), height(height), depth(depth) { } __host__ __device__ static AABB fromParticle(Particle *p) { DeviceVec centre(&p->position); float diameter = p->radius * 2; // This is width, height, and depth return AABB(centre, diameter, diameter, diameter); } /** * Function for checking whether this AABB and another intersect */ __device__ bool intersects(AABB *other) { float halfWidth = width / 2; float oHalfWidth = other->width / 2; float halfHeight = height / 2; float oHalfHeight = other->height / 2; float halfDepth = depth / 2; float oHalfDepth = other->depth / 2; if (getRight(halfWidth) < other->getLeft(oHalfWidth)) return false; if (getLeft(halfWidth) > other->getRight(oHalfWidth)) return false; if (getBottom(halfHeight) > other->getTop(oHalfHeight)) return false; if (getTop(halfHeight) < other->getBottom(oHalfHeight)) return false; if (getFront(halfDepth) < other->getBack(oHalfDepth)) return false; if (getBack(halfDepth) > other->getFront(oHalfDepth)) return false; return true; } // Get the AABB that is found by combining two AABBs AABB aabbUnion(AABB other) { float halfWidth = width / 2; float oHalfWidth = other.width / 2; float halfHeight = height / 2; float oHalfHeight = other.height / 2; float halfDepth = depth / 2; float oHalfDepth = other.depth / 2; // Get the extreme values (leftmost, rightmost, topmost, etc.) from either AABB float left = min(getLeft(halfWidth), other.getLeft(oHalfWidth)); float right = max(getRight(halfWidth), other.getRight(oHalfWidth)); float top = max(getTop(halfHeight), other.getTop(oHalfHeight)); float bottom = min(getBottom(halfHeight), other.getBottom(oHalfHeight)); float front = max(getFront(halfDepth), other.getFront(oHalfDepth)); float back = min(getBack(halfDepth), other.getBack(oHalfDepth)); // Calculate new width, height and depth based on above calculation float newWidth = right - left; float newHeight = top - bottom; float newDepth = front - back; DeviceVec newCentre(left + newWidth/2, bottom + newHeight/2, back + newDepth/2); return AABB(newCentre, newWidth, newHeight, newDepth); } }; /** * Represents a node in a Bounding Volume Hierarchy (BVH). * The BVH is described by its root node, and is a binary * tree of AABBs. */ struct BVHNode { int particleIdx; AABB boundingBox; int leftChildIdx, rightChildIdx; BVHNode() : particleIdx(-1), boundingBox(), leftChildIdx(-1), rightChildIdx(-1) { } // Constructor creates an internal (non-leaf) node BVHNode(AABB aabb, int l, int r) : particleIdx(-1), boundingBox(aabb), leftChildIdx(l), rightChildIdx(r) { } // Static function for creating a leaf node which directly represents a particle static BVHNode leafNode(Particle *p, int idx) { // Find the aabb bounding box for this particle // -1 represents there are no child nodes of this element BVHNode node(AABB::fromParticle(p), -1, -1); // Set the particle index node.particleIdx = idx; return node; } // Check whether a node is a leaf __device__ bool isLeaf() { // A node is leaf if both of it's child indexes are -1 return leftChildIdx == -1 && rightChildIdx == -1; } // Functions for checking the presence of individual children __device__ bool hasLeftChild() { return leftChildIdx != -1; } __device__ bool hasRightChild() { return rightChildIdx != -1; } }; static thrust::host_vector<BVHNode> nodes; static BVHNode *d_nodes; static int numBVHNodes; // Device function for checking whether two given particles collide __device__ bool particlesCollide(Particle *p1, Particle *p2) { // Find the vector between the two particles DeviceVec collideVec = DeviceVec(&(p2->position)) - DeviceVec(&(p1->position)); // Find the combined radius of the two particles float radiuses = p1->radius + p2->radius; float collideDistSq = radiuses * radiuses; // Particles collide if the distance between them is less // than their combined radiuses return collideVec.lengthSquared() <= collideDistSq; } // Resolve a collision between two particles. Adapted from CUDA samples __device__ void collide(Particle *p1, Particle *p2, DeviceVec *force) { DeviceVec posA(&p1->position); DeviceVec posB(&p2->position); DeviceVec velA(&p1->velocity); DeviceVec velB(&p2->velocity); // relative Position and velocity DeviceVec relPos = posB - posA; DeviceVec relVel = velB - velA; // Distance between the two particles float dist = relPos.length(); // Minimum distance for these particles to be colliding float collideDist = p1->radius + p2->radius; DeviceVec norm = relPos.normalised(); // New force is accumalated in the force parameter // spring force *force = *force - norm * 0.5f * (collideDist - dist); // damping force *force = *force + relVel * 0.02f; } /** * Recursive BVH Tree traversal */ __device__ void traverse( BVHNode node, BVHNode *nodes, AABB& queryAABB, Particle *particles, int particleIdx, DeviceVec* forces ) { if (node.boundingBox.intersects(&queryAABB)) { Particle* particle = &particles[particleIdx]; // Base case: Leaf node if (node.isLeaf()) { if (particleIdx != node.particleIdx && particlesCollide(particle, &particles[node.particleIdx])){ // Resolve collision. collide(particle, &particles[node.particleIdx], &forces[particleIdx]); } } else { // Recurse over left and right children as necessary if (node.hasLeftChild()) { BVHNode childL = nodes[node.leftChildIdx]; traverse(childL, nodes, queryAABB, particles, particleIdx, forces); } if (node.hasRightChild()) { BVHNode childR = nodes[node.rightChildIdx]; traverse(childR, nodes, queryAABB, particles, particleIdx, forces); } } } } /** * More efficient iterative traversal */ __device__ void traverseIterative( BVHNode *nodes, BVHNode root, AABB& queryAABB, Particle *particles, int particleIdx, DeviceVec* forces ) { Particle* particle = &particles[particleIdx]; // Stack for keeping track of which nodes to traverse next BVHNode* stack[64]; BVHNode** stackPtr = stack; // push to stak *stackPtr++ = NULL; // Start from the root BVHNode* node = &root; do { BVHNode *childL = node->hasLeftChild() ? &nodes[node->leftChildIdx] : NULL; BVHNode *childR = node->hasRightChild() ? &nodes[node->rightChildIdx] : NULL; // Check whether left and right children intersect with the particle in question // Also make sure we are not the not representing the partice itself bool overlapL = childL && childL->boundingBox.intersects(&queryAABB) && particleIdx != childL->particleIdx; bool overlapR = childR && childR->boundingBox.intersects(&queryAABB) && particleIdx != childR->particleIdx; // If there is an overlap and you are already a leaf then resolve the collision if (overlapL && childL->isLeaf()) collide(particle, &particles[childL->particleIdx], &forces[particleIdx]); if (overlapR && childR->isLeaf()) collide(particle, &particles[childR->particleIdx], &forces[particleIdx]); // If there is an overlap and you are not a leaf then traverse further bool traverseL = (overlapL && !childL->isLeaf()); bool traverseR = (overlapR && !childR->isLeaf()); if (!traverseL && !traverseR) // if nothing else to traverse, pop from stack node = *--stackPtr; else { // If left needs traversing, set node to be left child otherwise set to right child // This works because we already know that at least one of the nodes need traversing node = (traverseL) ? childL : childR; if (traverseL && traverseR) // If they actually both need traversing then push right child onto the stack *stackPtr++ = childR; } } while (node != NULL); } /** * Kernel for moving the particles */ __global__ void moveParticles(int bvhRootIdx, BVHNode *nodes, Particle *particles, Particle *out, int size, DeviceVec *forces) { int t_x = threadIdx.x; int b_x = blockIdx.x; int in_x = b_x * blockDim.x + t_x; if (in_x < size) { Particle thisParticle = particles[in_x]; DeviceVec newPosD(&thisParticle.position); DeviceVec velD(&thisParticle.velocity); // Initialise force for this particle forces[in_x] = DeviceVec(0, 0, 0); // Find the AABB query for the particle question AABB query = AABB::fromParticle(&thisParticle); // Traverse the BVH - querying for this particle traverseIterative( nodes, nodes[bvhRootIdx], query, particles, in_x, forces); __syncthreads(); velD = velD + forces[in_x]; // The below is the original (naive) collision detection method // DeviceVec force(0, 0, 0); // // for (int i = 0; i < size; i++) { // if (i != in_x) { // Don't consider ourselves // Particle other = particles[i]; // // if (particlesCollide(&thisParticle, &other)) { // collide(&thisParticle, &other, &force); // } // } // } // // velD = velD + force; // Calculate our new desired position newPosD = newPosD + velD; // Declare normal for wall collisions DeviceVec normalD(0, 0, 0); bool shouldReflect = false; // Set the reflection normal to the wall's normal, // if we're touching it if ((newPosD.x > 1 && velD.x > 0) || (newPosD.x < -1 && velD.x < 0)) { shouldReflect = true; normalD.x = 1; normalD.y = 0; normalD.z = 0; } if ((newPosD.y > 1 && velD.y > 0) || (newPosD.y < -1 && velD.y < 0)) { shouldReflect = true; normalD.x = 0; normalD.y = 1; normalD.z = 0; } if ((newPosD.z > 1 && velD.z > 0) || (newPosD.z < -1 && velD.z < 0)) { shouldReflect = true; normalD.x = 0; normalD.y = 0; normalD.z = 1; } if (shouldReflect) { // Reflect with respect to the wall's normal velD = velD.reflection(&normalD); } // Calculate the position after movement newPosD = DeviceVec(&thisParticle.position) + velD; newPosD.toVec3(&thisParticle.position); velD.toVec3(&thisParticle.velocity); // Move this particle out[in_x] = thisParticle; } } // Morton encoding functions taken from http://devblogs.nvidia.com/parallelforall/thinking-parallel-part-iii-tree-construction-gpu/ //////////////////////////////////////// // Expands a 10-bit integer into 30 bits // by inserting 2 zeros after each bit. __host__ __device__ unsigned int expandBits(unsigned int v) { v = (v * 0x00010001u) & 0xFF0000FFu; v = (v * 0x00000101u) & 0x0F00F00Fu; v = (v * 0x00000011u) & 0xC30C30C3u; v = (v * 0x00000005u) & 0x49249249u; return v; } // Calculates a 30-bit Morton code for the // given 3D point located within the cube [-1,1]. __host__ __device__ unsigned int morton3D(Particle *p) { DeviceVec v(&p->position); // Shift to scale coordinates between 0 and 1 float x = (v.x + 1) / 2; float y = (v.y + 1) / 2; float z = (v.z + 1) / 2; x = min(max(x * 1024, 0.0f), 1023.0f); y = min(max(y * 1024, 0.0f), 1023.0f); z = min(max(z * 1024, 0.0f), 1023.0f); unsigned int xx = expandBits((unsigned int) x); unsigned int yy = expandBits((unsigned int) y); unsigned int zz = expandBits((unsigned int) z); return (xx * 4) + (yy * 2) + zz; } /** * GPU Kernel for creating an array of Morton codes from a Particle array */ __global__ void copyMortonCodes(Particle *particles, unsigned int *mortonCodes, int size) { int t_x = threadIdx.x; int b_x = blockIdx.x; int in_x = b_x * blockDim.x + t_x; if (in_x < size) { mortonCodes[in_x] = morton3D(&particles[in_x]); } } /** * Find the number of leading zeroes in the bit representation of an unsigned integer */ int leadingZeros(unsigned int n) { // Find how many bits are being used to represent the int int numBits = (int)sizeof(n) * 8; // Create a mask that will pass over the number // The mask starts as a 1 followed by a series of 0s (e.g. 10000000) // The 1 will then shift along to the right unsigned int mask = 1 << (numBits-1); int numZeros = 0; // performing bitwise AND of the number and the mask will indicate whether the number has a 1 in a given position while (((n & mask) == 0) && (mask > 0)) { // increment the count for each position where we haven't yet found a 1 numZeros++; // Shift the mask along by 1 bit mask >>= 1; } return numZeros; } /** * Recursive method for finding the ideal place to split a series of Morton codes * The ideal place is the furthest point at which there is the first difference in the most significant bit (unused in favour of the more efficient and reliable tutorial method). */ int splitSearch(unsigned int *sortedMortonCodes, unsigned int currentMSB, unsigned int currentBest, int first, int last) { // find the midpoint of the indexes int mid = first + ((last - first) + 1)/2; // See how many leading values are the same between the Morton codes at the start and the midpoint int msb = leadingZeros(sortedMortonCodes[0] ^ sortedMortonCodes[mid]); // If you have only 1 Morton code if (first == last) { // If this is an improvement if (msb > currentMSB) // Set this to the current best value currentBest = first; // return whatever the current best is return currentBest; } // If this is higher (the bit has already changed) search over the right hand values to see if you can go further but keeping mid as the current best if (msb > currentMSB) { return splitSearch(sortedMortonCodes, currentMSB, mid, mid + 1, last); } // otherwise too many bits have changed (gone too far) search to the left else { return splitSearch(sortedMortonCodes, currentMSB, currentBest, first, mid - 1); } } /** * This function taken directly from the tutorial (http://devblogs.nvidia.com/parallelforall/thinking-parallel-part-iii-tree-construction-gpu/). * Finds the ideal place to split a series of Morton codes. * The ideal place is the furthest point at which there is the first difference in the most significant bit. */ int findSplit(unsigned int *sortedMortonCodes, int first, int last) { // Identical Morton codes => split the range in the middle. unsigned int firstCode = sortedMortonCodes[first]; unsigned int lastCode = sortedMortonCodes[last]; if (firstCode == lastCode) return (first + last) >> 1; // Calculate the number of highest bits that are the same // for all objects, using the count-leading-zeros intrinsic. int commonPrefix = leadingZeros(firstCode ^ lastCode); // Use binary search to find where the next bit differs. // Specifically, we are looking for the highest object that // shares more than commonPrefix bits with the first one. int split = first; // initial guess int step = last - first; do { step = (step + 1) >> 1; // exponential decrease int newSplit = split + step; // proposed new position if (newSplit < last) { unsigned int splitCode = sortedMortonCodes[newSplit]; int splitPrefix = leadingZeros(firstCode ^ splitCode); if (splitPrefix > commonPrefix) split = newSplit; // accept proposal } } while (step > 1); return split; } /** * BVH generation adapted from the above link */ BVHNode generateBVH(unsigned int *sortedMortonCodes, Particle *particles, int *sortedParticleIdxs, int first, int last, int &numNodes, thrust::host_vector<BVHNode> &nodes) { numNodes++; // Base case: create a leaf node if (first == last) { BVHNode node = BVHNode::leafNode(&particles[sortedParticleIdxs[first]], sortedParticleIdxs[first]); nodes.push_back(node); return node; } // Find the point to split Morton codes for subtrees int splitIdx = findSplit(sortedMortonCodes, first, last); // Recursively generate subtrees for the split ranges BVHNode left = generateBVH(sortedMortonCodes, particles, sortedParticleIdxs, first, splitIdx, numNodes, nodes); int leftIdx = nodes.size() - 1; BVHNode right = generateBVH(sortedMortonCodes, particles, sortedParticleIdxs, splitIdx + 1, last, numNodes, nodes); int rightIdx = nodes.size() - 1; // Node contains union of left and right bounding boxes BVHNode node(left.boundingBox.aabbUnion(right.boundingBox), leftIdx, rightIdx); nodes.push_back(node); return node; } /* * The following 2 functions taken from the cuda samples */ int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } /** * Compute the ideal grid size for the number of particles that we have */ void computeGridSize(int n, int blockSize, int &numBlocks, int &numThreads) { numThreads = min(blockSize, n); numBlocks = iDivUp(n, numThreads); } /** * Initialize CUDA - allocate memory, etc. */ void cuda_init(Particle *particles, int numParticles) { mortonCodes = (unsigned int*)malloc(sizeof(unsigned int) * numParticles); // Initialise device memory for particles hipMalloc((void**) &d_particles, sizeof(Particle) * numParticles); hipMalloc((void**) &d_particles_out, sizeof(Particle) * numParticles); hipMalloc((void**) &d_forces, sizeof(DeviceVec) * numParticles); // hipMalloc((void**) &d_particleIdxs, sizeof(int) * numParticles); hipMalloc((void**) &d_morton_codes, sizeof(unsigned int) * numParticles); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Memory Allocation Error: %s\n", hipGetErrorString(err)); computeGridSize(numParticles, 256, gridSize, blockSize); } static int count = 0; /** * Update the particles - called */ void particles_update(Particle *particles, int particlesSize) { // copy host memory to device hipMemcpy(d_particles, particles, sizeof(Particle) * particlesSize, hipMemcpyHostToDevice); int *particleIdxs = (int*)malloc(sizeof(int) * particlesSize); for (int i = 0; i < particlesSize; i++) { particleIdxs[i] = i; } // hipMemcpy(d_particleIdxs, particleIdxs, sizeof(int) * particlesSize, hipMemcpyHostToDevice); hipLaunchKernelGGL(( copyMortonCodes), dim3(gridSize), dim3(blockSize), 0, 0, d_particles, d_morton_codes, particlesSize); // hipDeviceSynchronize(); hipError_t err = hipMemcpy(mortonCodes, d_morton_codes, sizeof(unsigned int) * particlesSize, hipMemcpyDeviceToHost); // Sort Particles by their Morton codes thrust::sort_by_key(mortonCodes, mortonCodes + particlesSize, particleIdxs); // Generate the BVH numBVHNodes = 0; nodes.clear(); BVHNode rootNode = generateBVH(mortonCodes, particles, particleIdxs, 0, particlesSize - 1, numBVHNodes, nodes); int rootIndex = nodes.size() - 1; free (particleIdxs); err = hipMalloc((void**) &d_nodes, sizeof(BVHNode) * numBVHNodes); if (err != hipSuccess) printf("[%d] Alloc Nodes Error: %s\n", count, hipGetErrorString(err)); err = hipMemcpy(d_nodes, thrust::raw_pointer_cast(&nodes[0]), sizeof(BVHNode) * nodes.size(), hipMemcpyHostToDevice); if (err != hipSuccess) printf("[%d] Copy Nodes Error: %s\n", count, hipGetErrorString(err)); // Kernel for calculating, and then moving the particles to, the new positions hipLaunchKernelGGL(( moveParticles), dim3(gridSize), dim3(blockSize), 0, 0, rootIndex, d_nodes, d_particles, d_particles_out, particlesSize, d_forces); hipMemcpy(particles, d_particles_out, sizeof(Particle) * particlesSize, hipMemcpyDeviceToHost); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) printf("[%d] Move Particles Error: %s\n", count++, hipGetErrorString(err)); // copy result from device to host hipFree(d_nodes); }
df7b2ce38f652fb6c55e716d883d7b2c77a102c3.cu
#include "particles.cuh" #include "vec.h" #include <stdio.h> #include <math.h> #include <cuda.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> class DeviceVec; class BVHNode; static int gridSize; static int blockSize; static Particle *d_particles; static Particle *d_particles_out; // static int *d_particleIdxs; static unsigned int *mortonCodes; static unsigned int *d_morton_codes; static DeviceVec *d_forces; /** * 3D vector class for the device. */ class DeviceVec { public: // Member variables float x; float y; float z; public: // Methods __host__ __device__ DeviceVec(float x, float y, float z) : x(x), y(y), z(z) { } __host__ __device__ DeviceVec(DeviceVec *v) : x(v->x), y(v->y), z(v->z) { } __host__ __device__ DeviceVec(Vec3 *v) : x(v->x), y(v->y), z(v->z) { } __device__ float lengthSquared() { float sum = 0; sum += x * x; sum += y * y; sum += z * z; return sum; } __device__ float length() { return sqrt(lengthSquared()); } __device__ void toVec3(Vec3 *toFill) { toFill->x = x; toFill->y = y; toFill->z = z; } __device__ float dot(DeviceVec *other) { return x * other->x + y * other->y + z * other->z; } __device__ DeviceVec normalised() { float len = length(); DeviceVec normalised(x / len, y / len, z / len); return normalised; } __device__ DeviceVec *reflection(DeviceVec *normal) { DeviceVec tmp(x, y, z); tmp = tmp - (*normal * 2 * this->dot(normal)); return new DeviceVec(tmp); } // Operators __device__ DeviceVec operator+(const DeviceVec& other) { DeviceVec newVec(x + other.x, y + other.y, z + other.z); return newVec; } __device__ DeviceVec operator-(const DeviceVec& other) { DeviceVec newVec(x - other.x, y - other.y, z - other.z); return newVec; } __device__ DeviceVec operator-(const float scalar) { DeviceVec newVec(x - scalar, y - scalar, z - scalar); return newVec; } __device__ DeviceVec operator*(const DeviceVec& other) { DeviceVec newVec(x * other.x, y * other.y, z * other.z); return newVec; } __device__ DeviceVec operator*(const float scalar) { DeviceVec newVec(x * scalar, y * scalar, z * scalar); return newVec; } }; /** * Axis-Aligned Bounding Box (AABB) class. * Used for collision detection within the Bounding Volume * Hierarchy structure. */ class AABB { private: DeviceVec centre; float width; float height; float depth; // Accessor methods (half dimensions are pre-computed // to provide a small level of optimisation) __host__ __device__ float getLeft(float halfWidth) { return centre.x - halfWidth; } __host__ __device__ float getRight(float halfWidth) { return centre.x + halfWidth; } __host__ __device__ float getTop(float halfHeight) { return centre.y + halfHeight; } __host__ __device__ float getBottom(float halfHeight) { return centre.y - halfHeight; } __host__ __device__ float getFront(float halfDepth) { return centre.z + halfDepth; } __host__ __device__ float getBack(float halfDepth) { return centre.z - halfDepth; } public: __host__ __device__ AABB() : centre(0,0,0), width(0), height(0), depth(0) { } __host__ __device__ AABB(DeviceVec centre, float width, float height, float depth) : centre(centre), width(width), height(height), depth(depth) { } __host__ __device__ static AABB fromParticle(Particle *p) { DeviceVec centre(&p->position); float diameter = p->radius * 2; // This is width, height, and depth return AABB(centre, diameter, diameter, diameter); } /** * Function for checking whether this AABB and another intersect */ __device__ bool intersects(AABB *other) { float halfWidth = width / 2; float oHalfWidth = other->width / 2; float halfHeight = height / 2; float oHalfHeight = other->height / 2; float halfDepth = depth / 2; float oHalfDepth = other->depth / 2; if (getRight(halfWidth) < other->getLeft(oHalfWidth)) return false; if (getLeft(halfWidth) > other->getRight(oHalfWidth)) return false; if (getBottom(halfHeight) > other->getTop(oHalfHeight)) return false; if (getTop(halfHeight) < other->getBottom(oHalfHeight)) return false; if (getFront(halfDepth) < other->getBack(oHalfDepth)) return false; if (getBack(halfDepth) > other->getFront(oHalfDepth)) return false; return true; } // Get the AABB that is found by combining two AABBs AABB aabbUnion(AABB other) { float halfWidth = width / 2; float oHalfWidth = other.width / 2; float halfHeight = height / 2; float oHalfHeight = other.height / 2; float halfDepth = depth / 2; float oHalfDepth = other.depth / 2; // Get the extreme values (leftmost, rightmost, topmost, etc.) from either AABB float left = min(getLeft(halfWidth), other.getLeft(oHalfWidth)); float right = max(getRight(halfWidth), other.getRight(oHalfWidth)); float top = max(getTop(halfHeight), other.getTop(oHalfHeight)); float bottom = min(getBottom(halfHeight), other.getBottom(oHalfHeight)); float front = max(getFront(halfDepth), other.getFront(oHalfDepth)); float back = min(getBack(halfDepth), other.getBack(oHalfDepth)); // Calculate new width, height and depth based on above calculation float newWidth = right - left; float newHeight = top - bottom; float newDepth = front - back; DeviceVec newCentre(left + newWidth/2, bottom + newHeight/2, back + newDepth/2); return AABB(newCentre, newWidth, newHeight, newDepth); } }; /** * Represents a node in a Bounding Volume Hierarchy (BVH). * The BVH is described by its root node, and is a binary * tree of AABBs. */ struct BVHNode { int particleIdx; AABB boundingBox; int leftChildIdx, rightChildIdx; BVHNode() : particleIdx(-1), boundingBox(), leftChildIdx(-1), rightChildIdx(-1) { } // Constructor creates an internal (non-leaf) node BVHNode(AABB aabb, int l, int r) : particleIdx(-1), boundingBox(aabb), leftChildIdx(l), rightChildIdx(r) { } // Static function for creating a leaf node which directly represents a particle static BVHNode leafNode(Particle *p, int idx) { // Find the aabb bounding box for this particle // -1 represents there are no child nodes of this element BVHNode node(AABB::fromParticle(p), -1, -1); // Set the particle index node.particleIdx = idx; return node; } // Check whether a node is a leaf __device__ bool isLeaf() { // A node is leaf if both of it's child indexes are -1 return leftChildIdx == -1 && rightChildIdx == -1; } // Functions for checking the presence of individual children __device__ bool hasLeftChild() { return leftChildIdx != -1; } __device__ bool hasRightChild() { return rightChildIdx != -1; } }; static thrust::host_vector<BVHNode> nodes; static BVHNode *d_nodes; static int numBVHNodes; // Device function for checking whether two given particles collide __device__ bool particlesCollide(Particle *p1, Particle *p2) { // Find the vector between the two particles DeviceVec collideVec = DeviceVec(&(p2->position)) - DeviceVec(&(p1->position)); // Find the combined radius of the two particles float radiuses = p1->radius + p2->radius; float collideDistSq = radiuses * radiuses; // Particles collide if the distance between them is less // than their combined radiuses return collideVec.lengthSquared() <= collideDistSq; } // Resolve a collision between two particles. Adapted from CUDA samples __device__ void collide(Particle *p1, Particle *p2, DeviceVec *force) { DeviceVec posA(&p1->position); DeviceVec posB(&p2->position); DeviceVec velA(&p1->velocity); DeviceVec velB(&p2->velocity); // relative Position and velocity DeviceVec relPos = posB - posA; DeviceVec relVel = velB - velA; // Distance between the two particles float dist = relPos.length(); // Minimum distance for these particles to be colliding float collideDist = p1->radius + p2->radius; DeviceVec norm = relPos.normalised(); // New force is accumalated in the force parameter // spring force *force = *force - norm * 0.5f * (collideDist - dist); // damping force *force = *force + relVel * 0.02f; } /** * Recursive BVH Tree traversal */ __device__ void traverse( BVHNode node, BVHNode *nodes, AABB& queryAABB, Particle *particles, int particleIdx, DeviceVec* forces ) { if (node.boundingBox.intersects(&queryAABB)) { Particle* particle = &particles[particleIdx]; // Base case: Leaf node if (node.isLeaf()) { if (particleIdx != node.particleIdx && particlesCollide(particle, &particles[node.particleIdx])){ // Resolve collision. collide(particle, &particles[node.particleIdx], &forces[particleIdx]); } } else { // Recurse over left and right children as necessary if (node.hasLeftChild()) { BVHNode childL = nodes[node.leftChildIdx]; traverse(childL, nodes, queryAABB, particles, particleIdx, forces); } if (node.hasRightChild()) { BVHNode childR = nodes[node.rightChildIdx]; traverse(childR, nodes, queryAABB, particles, particleIdx, forces); } } } } /** * More efficient iterative traversal */ __device__ void traverseIterative( BVHNode *nodes, BVHNode root, AABB& queryAABB, Particle *particles, int particleIdx, DeviceVec* forces ) { Particle* particle = &particles[particleIdx]; // Stack for keeping track of which nodes to traverse next BVHNode* stack[64]; BVHNode** stackPtr = stack; // push to stak *stackPtr++ = NULL; // Start from the root BVHNode* node = &root; do { BVHNode *childL = node->hasLeftChild() ? &nodes[node->leftChildIdx] : NULL; BVHNode *childR = node->hasRightChild() ? &nodes[node->rightChildIdx] : NULL; // Check whether left and right children intersect with the particle in question // Also make sure we are not the not representing the partice itself bool overlapL = childL && childL->boundingBox.intersects(&queryAABB) && particleIdx != childL->particleIdx; bool overlapR = childR && childR->boundingBox.intersects(&queryAABB) && particleIdx != childR->particleIdx; // If there is an overlap and you are already a leaf then resolve the collision if (overlapL && childL->isLeaf()) collide(particle, &particles[childL->particleIdx], &forces[particleIdx]); if (overlapR && childR->isLeaf()) collide(particle, &particles[childR->particleIdx], &forces[particleIdx]); // If there is an overlap and you are not a leaf then traverse further bool traverseL = (overlapL && !childL->isLeaf()); bool traverseR = (overlapR && !childR->isLeaf()); if (!traverseL && !traverseR) // if nothing else to traverse, pop from stack node = *--stackPtr; else { // If left needs traversing, set node to be left child otherwise set to right child // This works because we already know that at least one of the nodes need traversing node = (traverseL) ? childL : childR; if (traverseL && traverseR) // If they actually both need traversing then push right child onto the stack *stackPtr++ = childR; } } while (node != NULL); } /** * Kernel for moving the particles */ __global__ void moveParticles(int bvhRootIdx, BVHNode *nodes, Particle *particles, Particle *out, int size, DeviceVec *forces) { int t_x = threadIdx.x; int b_x = blockIdx.x; int in_x = b_x * blockDim.x + t_x; if (in_x < size) { Particle thisParticle = particles[in_x]; DeviceVec newPosD(&thisParticle.position); DeviceVec velD(&thisParticle.velocity); // Initialise force for this particle forces[in_x] = DeviceVec(0, 0, 0); // Find the AABB query for the particle question AABB query = AABB::fromParticle(&thisParticle); // Traverse the BVH - querying for this particle traverseIterative( nodes, nodes[bvhRootIdx], query, particles, in_x, forces); __syncthreads(); velD = velD + forces[in_x]; // The below is the original (naive) collision detection method // DeviceVec force(0, 0, 0); // // for (int i = 0; i < size; i++) { // if (i != in_x) { // Don't consider ourselves // Particle other = particles[i]; // // if (particlesCollide(&thisParticle, &other)) { // collide(&thisParticle, &other, &force); // } // } // } // // velD = velD + force; // Calculate our new desired position newPosD = newPosD + velD; // Declare normal for wall collisions DeviceVec normalD(0, 0, 0); bool shouldReflect = false; // Set the reflection normal to the wall's normal, // if we're touching it if ((newPosD.x > 1 && velD.x > 0) || (newPosD.x < -1 && velD.x < 0)) { shouldReflect = true; normalD.x = 1; normalD.y = 0; normalD.z = 0; } if ((newPosD.y > 1 && velD.y > 0) || (newPosD.y < -1 && velD.y < 0)) { shouldReflect = true; normalD.x = 0; normalD.y = 1; normalD.z = 0; } if ((newPosD.z > 1 && velD.z > 0) || (newPosD.z < -1 && velD.z < 0)) { shouldReflect = true; normalD.x = 0; normalD.y = 0; normalD.z = 1; } if (shouldReflect) { // Reflect with respect to the wall's normal velD = velD.reflection(&normalD); } // Calculate the position after movement newPosD = DeviceVec(&thisParticle.position) + velD; newPosD.toVec3(&thisParticle.position); velD.toVec3(&thisParticle.velocity); // Move this particle out[in_x] = thisParticle; } } // Morton encoding functions taken from http://devblogs.nvidia.com/parallelforall/thinking-parallel-part-iii-tree-construction-gpu/ //////////////////////////////////////// // Expands a 10-bit integer into 30 bits // by inserting 2 zeros after each bit. __host__ __device__ unsigned int expandBits(unsigned int v) { v = (v * 0x00010001u) & 0xFF0000FFu; v = (v * 0x00000101u) & 0x0F00F00Fu; v = (v * 0x00000011u) & 0xC30C30C3u; v = (v * 0x00000005u) & 0x49249249u; return v; } // Calculates a 30-bit Morton code for the // given 3D point located within the cube [-1,1]. __host__ __device__ unsigned int morton3D(Particle *p) { DeviceVec v(&p->position); // Shift to scale coordinates between 0 and 1 float x = (v.x + 1) / 2; float y = (v.y + 1) / 2; float z = (v.z + 1) / 2; x = min(max(x * 1024, 0.0f), 1023.0f); y = min(max(y * 1024, 0.0f), 1023.0f); z = min(max(z * 1024, 0.0f), 1023.0f); unsigned int xx = expandBits((unsigned int) x); unsigned int yy = expandBits((unsigned int) y); unsigned int zz = expandBits((unsigned int) z); return (xx * 4) + (yy * 2) + zz; } /** * GPU Kernel for creating an array of Morton codes from a Particle array */ __global__ void copyMortonCodes(Particle *particles, unsigned int *mortonCodes, int size) { int t_x = threadIdx.x; int b_x = blockIdx.x; int in_x = b_x * blockDim.x + t_x; if (in_x < size) { mortonCodes[in_x] = morton3D(&particles[in_x]); } } /** * Find the number of leading zeroes in the bit representation of an unsigned integer */ int leadingZeros(unsigned int n) { // Find how many bits are being used to represent the int int numBits = (int)sizeof(n) * 8; // Create a mask that will pass over the number // The mask starts as a 1 followed by a series of 0s (e.g. 10000000) // The 1 will then shift along to the right unsigned int mask = 1 << (numBits-1); int numZeros = 0; // performing bitwise AND of the number and the mask will indicate whether the number has a 1 in a given position while (((n & mask) == 0) && (mask > 0)) { // increment the count for each position where we haven't yet found a 1 numZeros++; // Shift the mask along by 1 bit mask >>= 1; } return numZeros; } /** * Recursive method for finding the ideal place to split a series of Morton codes * The ideal place is the furthest point at which there is the first difference in the most significant bit (unused in favour of the more efficient and reliable tutorial method). */ int splitSearch(unsigned int *sortedMortonCodes, unsigned int currentMSB, unsigned int currentBest, int first, int last) { // find the midpoint of the indexes int mid = first + ((last - first) + 1)/2; // See how many leading values are the same between the Morton codes at the start and the midpoint int msb = leadingZeros(sortedMortonCodes[0] ^ sortedMortonCodes[mid]); // If you have only 1 Morton code if (first == last) { // If this is an improvement if (msb > currentMSB) // Set this to the current best value currentBest = first; // return whatever the current best is return currentBest; } // If this is higher (the bit has already changed) search over the right hand values to see if you can go further but keeping mid as the current best if (msb > currentMSB) { return splitSearch(sortedMortonCodes, currentMSB, mid, mid + 1, last); } // otherwise too many bits have changed (gone too far) search to the left else { return splitSearch(sortedMortonCodes, currentMSB, currentBest, first, mid - 1); } } /** * This function taken directly from the tutorial (http://devblogs.nvidia.com/parallelforall/thinking-parallel-part-iii-tree-construction-gpu/). * Finds the ideal place to split a series of Morton codes. * The ideal place is the furthest point at which there is the first difference in the most significant bit. */ int findSplit(unsigned int *sortedMortonCodes, int first, int last) { // Identical Morton codes => split the range in the middle. unsigned int firstCode = sortedMortonCodes[first]; unsigned int lastCode = sortedMortonCodes[last]; if (firstCode == lastCode) return (first + last) >> 1; // Calculate the number of highest bits that are the same // for all objects, using the count-leading-zeros intrinsic. int commonPrefix = leadingZeros(firstCode ^ lastCode); // Use binary search to find where the next bit differs. // Specifically, we are looking for the highest object that // shares more than commonPrefix bits with the first one. int split = first; // initial guess int step = last - first; do { step = (step + 1) >> 1; // exponential decrease int newSplit = split + step; // proposed new position if (newSplit < last) { unsigned int splitCode = sortedMortonCodes[newSplit]; int splitPrefix = leadingZeros(firstCode ^ splitCode); if (splitPrefix > commonPrefix) split = newSplit; // accept proposal } } while (step > 1); return split; } /** * BVH generation adapted from the above link */ BVHNode generateBVH(unsigned int *sortedMortonCodes, Particle *particles, int *sortedParticleIdxs, int first, int last, int &numNodes, thrust::host_vector<BVHNode> &nodes) { numNodes++; // Base case: create a leaf node if (first == last) { BVHNode node = BVHNode::leafNode(&particles[sortedParticleIdxs[first]], sortedParticleIdxs[first]); nodes.push_back(node); return node; } // Find the point to split Morton codes for subtrees int splitIdx = findSplit(sortedMortonCodes, first, last); // Recursively generate subtrees for the split ranges BVHNode left = generateBVH(sortedMortonCodes, particles, sortedParticleIdxs, first, splitIdx, numNodes, nodes); int leftIdx = nodes.size() - 1; BVHNode right = generateBVH(sortedMortonCodes, particles, sortedParticleIdxs, splitIdx + 1, last, numNodes, nodes); int rightIdx = nodes.size() - 1; // Node contains union of left and right bounding boxes BVHNode node(left.boundingBox.aabbUnion(right.boundingBox), leftIdx, rightIdx); nodes.push_back(node); return node; } /* * The following 2 functions taken from the cuda samples */ int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } /** * Compute the ideal grid size for the number of particles that we have */ void computeGridSize(int n, int blockSize, int &numBlocks, int &numThreads) { numThreads = min(blockSize, n); numBlocks = iDivUp(n, numThreads); } /** * Initialize CUDA - allocate memory, etc. */ void cuda_init(Particle *particles, int numParticles) { mortonCodes = (unsigned int*)malloc(sizeof(unsigned int) * numParticles); // Initialise device memory for particles cudaMalloc((void**) &d_particles, sizeof(Particle) * numParticles); cudaMalloc((void**) &d_particles_out, sizeof(Particle) * numParticles); cudaMalloc((void**) &d_forces, sizeof(DeviceVec) * numParticles); // cudaMalloc((void**) &d_particleIdxs, sizeof(int) * numParticles); cudaMalloc((void**) &d_morton_codes, sizeof(unsigned int) * numParticles); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Memory Allocation Error: %s\n", cudaGetErrorString(err)); computeGridSize(numParticles, 256, gridSize, blockSize); } static int count = 0; /** * Update the particles - called */ void particles_update(Particle *particles, int particlesSize) { // copy host memory to device cudaMemcpy(d_particles, particles, sizeof(Particle) * particlesSize, cudaMemcpyHostToDevice); int *particleIdxs = (int*)malloc(sizeof(int) * particlesSize); for (int i = 0; i < particlesSize; i++) { particleIdxs[i] = i; } // cudaMemcpy(d_particleIdxs, particleIdxs, sizeof(int) * particlesSize, cudaMemcpyHostToDevice); copyMortonCodes<<<gridSize, blockSize>>>(d_particles, d_morton_codes, particlesSize); // cudaThreadSynchronize(); cudaError_t err = cudaMemcpy(mortonCodes, d_morton_codes, sizeof(unsigned int) * particlesSize, cudaMemcpyDeviceToHost); // Sort Particles by their Morton codes thrust::sort_by_key(mortonCodes, mortonCodes + particlesSize, particleIdxs); // Generate the BVH numBVHNodes = 0; nodes.clear(); BVHNode rootNode = generateBVH(mortonCodes, particles, particleIdxs, 0, particlesSize - 1, numBVHNodes, nodes); int rootIndex = nodes.size() - 1; free (particleIdxs); err = cudaMalloc((void**) &d_nodes, sizeof(BVHNode) * numBVHNodes); if (err != cudaSuccess) printf("[%d] Alloc Nodes Error: %s\n", count, cudaGetErrorString(err)); err = cudaMemcpy(d_nodes, thrust::raw_pointer_cast(&nodes[0]), sizeof(BVHNode) * nodes.size(), cudaMemcpyHostToDevice); if (err != cudaSuccess) printf("[%d] Copy Nodes Error: %s\n", count, cudaGetErrorString(err)); // Kernel for calculating, and then moving the particles to, the new positions moveParticles<<<gridSize, blockSize>>>(rootIndex, d_nodes, d_particles, d_particles_out, particlesSize, d_forces); cudaMemcpy(particles, d_particles_out, sizeof(Particle) * particlesSize, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) printf("[%d] Move Particles Error: %s\n", count++, cudaGetErrorString(err)); // copy result from device to host cudaFree(d_nodes); }
21d41badccbf4248f93a66831013658c17cb5b30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * TP 2 - Convolution d'images * -------------------------- * Mmoire constante et textures * * File: student.cu * Author: Maxime MARIA */ #include "student.hpp" #include "chronoGPU.hpp" namespace IMAC { // ================================================== For image comparison std::ostream &operator <<(std::ostream &os, const uchar4 &c) { os << "[" << uint(c.x) << "," << uint(c.y) << "," << uint(c.z) << "," << uint(c.w) << "]"; return os; } void compareImages(const std::vector<uchar4> &a, const std::vector<uchar4> &b) { bool error = false; if (a.size() != b.size()) { std::cout << "Size is different !" << std::endl; error = true; } else { for (uint i = 0; i < a.size(); ++i) { // Floating precision can cause small difference between host and device if ( std::abs(a[i].x - b[i].x) > 2 || std::abs(a[i].y - b[i].y) > 2 || std::abs(a[i].z - b[i].z) > 2 || std::abs(a[i].w - b[i].w) > 2) { std::cout << "Error at index " << i << ": a = " << a[i] << " - b = " << b[i] << " - " << std::abs(a[i].x - b[i].x) << std::endl; error = true; break; } } } if (error) { std::cout << " -> You failed, retry!" << std::endl; } else { std::cout << " -> Well done!" << std::endl; } } // ================================================== __device__ float clampfGPU(const float val, const float min , const float max) { return fminf(max, fmaxf(min, val)); } __global__ void convGPU(const uchar4 *input, const uint imgWidth, const uint imgHeight, const float *matConv, const uint matSize, uchar4 *output){ // id global en x const int idThreadGX = threadIdx.x // id du thread dans le block + blockIdx.x // id du block dans la grid * blockDim.x; // taille d'un block, nb threads dans blocks // nb threads global en x const int nbThreadsGX = blockDim.x * gridDim.x; // nb blocks dans grid // id global en y const int idThreadGY = threadIdx.y // id du thread dans le block + blockIdx.y // id du block dans la grid * blockDim.y; // taille d'un block, nb threads dans blocks // nb threads global en y const int nbThreadsGY = blockDim.y * gridDim.y; // nb blocks dans grid for (int idY = idThreadGY; idY < imgHeight; idY += nbThreadsGY) { for(int idX = idThreadGX; idX < imgWidth; idX += nbThreadsGX){ float3 sum = make_float3(0.f,0.f,0.f); // Apply convolution for ( uint j = 0; j < matSize; ++j ) { for ( uint i = 0; i < matSize; ++i ) { int dX = idX + i - matSize / 2; int dY = idY + j - matSize / 2; // Handle borders if ( dX < 0 ) dX = 0; if ( dX >= imgWidth ) dX = imgWidth - 1; if ( dY < 0 ) dY = 0; if ( dY >= imgHeight ) dY = imgHeight - 1; const int idMat = j * matSize + i; const int idPixel = dY * imgWidth + dX; sum.x += (float)input[idPixel].x * matConv[idMat]; sum.y += (float)input[idPixel].y * matConv[idMat]; sum.z += (float)input[idPixel].z * matConv[idMat]; } } const int idOut = idY * imgWidth + idX; output[idOut].x = (uchar)clampfGPU( sum.x, 0.f, 255.f ); output[idOut].y = (uchar)clampfGPU( sum.y, 0.f, 255.f ); output[idOut].z = (uchar)clampfGPU( sum.z, 0.f, 255.f ); output[idOut].w = 255; } } } void studentJob(const std::vector<uchar4> &inputImg, // Input image const uint imgWidth, const uint imgHeight, // Image size const std::vector<float> &matConv, // Convolution matrix (square) const uint matSize, // Matrix size (width or height) const std::vector<uchar4> &resultCPU, // Just for comparison std::vector<uchar4> &output // Output image ) { ChronoGPU chrGPU; // 3 arrays for GPU uchar4 *dev_inputImg = NULL; uchar4 *dev_output = NULL; float *dev_inputMat = NULL; std::cout << "Allocating 3 arrays: "; chrGPU.start(); const size_t bytesImg = inputImg.size() * sizeof(uchar4); const size_t bytesMat = inputImg.size() * sizeof(float); HANDLE_ERROR(hipMalloc((void **) &dev_inputImg, bytesImg)); HANDLE_ERROR(hipMalloc((void **) &dev_output, bytesImg)); HANDLE_ERROR(hipMalloc((void **) &dev_inputMat, bytesMat)); chrGPU.stop(); std::cout << "Allocation -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; std::cout << "Copying data to GPU : "; chrGPU.start(); // Copy data from host to device (input arrays) HANDLE_ERROR(hipMemcpy(dev_inputImg, inputImg.data(), bytesImg, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_inputMat, matConv.data(), bytesMat, hipMemcpyHostToDevice)); chrGPU.stop(); std::cout << "Copying -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; // Launch kernel chrGPU.start();//dim3 std::cout << "Lauching the kernel"; hipLaunchKernelGGL(( convGPU), dim3(dim3(16, 16)), dim3(dim3(32, 32)), 0, 0, dev_inputImg, imgWidth, imgHeight, dev_inputMat, matSize, dev_output); chrGPU.stop(); std::cout << "Calculations -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl; std::cout << "Copying data to CPU : "; chrGPU.start(); // Copy data from device to host (output array) HANDLE_ERROR(hipMemcpy(output.data(), dev_output, bytesImg, hipMemcpyDeviceToHost)); chrGPU.stop(); std::cout << "Copying -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; compareImages(resultCPU, output); // Free arrays on device HANDLE_ERROR(hipFree(dev_inputImg)); HANDLE_ERROR(hipFree(dev_output)); HANDLE_ERROR(hipFree(dev_inputMat)); } }
21d41badccbf4248f93a66831013658c17cb5b30.cu
/* * TP 2 - Convolution d'images * -------------------------- * Mémoire constante et textures * * File: student.cu * Author: Maxime MARIA */ #include "student.hpp" #include "chronoGPU.hpp" namespace IMAC { // ================================================== For image comparison std::ostream &operator <<(std::ostream &os, const uchar4 &c) { os << "[" << uint(c.x) << "," << uint(c.y) << "," << uint(c.z) << "," << uint(c.w) << "]"; return os; } void compareImages(const std::vector<uchar4> &a, const std::vector<uchar4> &b) { bool error = false; if (a.size() != b.size()) { std::cout << "Size is different !" << std::endl; error = true; } else { for (uint i = 0; i < a.size(); ++i) { // Floating precision can cause small difference between host and device if ( std::abs(a[i].x - b[i].x) > 2 || std::abs(a[i].y - b[i].y) > 2 || std::abs(a[i].z - b[i].z) > 2 || std::abs(a[i].w - b[i].w) > 2) { std::cout << "Error at index " << i << ": a = " << a[i] << " - b = " << b[i] << " - " << std::abs(a[i].x - b[i].x) << std::endl; error = true; break; } } } if (error) { std::cout << " -> You failed, retry!" << std::endl; } else { std::cout << " -> Well done!" << std::endl; } } // ================================================== __device__ float clampfGPU(const float val, const float min , const float max) { return fminf(max, fmaxf(min, val)); } __global__ void convGPU(const uchar4 *input, const uint imgWidth, const uint imgHeight, const float *matConv, const uint matSize, uchar4 *output){ // id global en x const int idThreadGX = threadIdx.x // id du thread dans le block + blockIdx.x // id du block dans la grid * blockDim.x; // taille d'un block, nb threads dans blocks // nb threads global en x const int nbThreadsGX = blockDim.x * gridDim.x; // nb blocks dans grid // id global en y const int idThreadGY = threadIdx.y // id du thread dans le block + blockIdx.y // id du block dans la grid * blockDim.y; // taille d'un block, nb threads dans blocks // nb threads global en y const int nbThreadsGY = blockDim.y * gridDim.y; // nb blocks dans grid for (int idY = idThreadGY; idY < imgHeight; idY += nbThreadsGY) { for(int idX = idThreadGX; idX < imgWidth; idX += nbThreadsGX){ float3 sum = make_float3(0.f,0.f,0.f); // Apply convolution for ( uint j = 0; j < matSize; ++j ) { for ( uint i = 0; i < matSize; ++i ) { int dX = idX + i - matSize / 2; int dY = idY + j - matSize / 2; // Handle borders if ( dX < 0 ) dX = 0; if ( dX >= imgWidth ) dX = imgWidth - 1; if ( dY < 0 ) dY = 0; if ( dY >= imgHeight ) dY = imgHeight - 1; const int idMat = j * matSize + i; const int idPixel = dY * imgWidth + dX; sum.x += (float)input[idPixel].x * matConv[idMat]; sum.y += (float)input[idPixel].y * matConv[idMat]; sum.z += (float)input[idPixel].z * matConv[idMat]; } } const int idOut = idY * imgWidth + idX; output[idOut].x = (uchar)clampfGPU( sum.x, 0.f, 255.f ); output[idOut].y = (uchar)clampfGPU( sum.y, 0.f, 255.f ); output[idOut].z = (uchar)clampfGPU( sum.z, 0.f, 255.f ); output[idOut].w = 255; } } } void studentJob(const std::vector<uchar4> &inputImg, // Input image const uint imgWidth, const uint imgHeight, // Image size const std::vector<float> &matConv, // Convolution matrix (square) const uint matSize, // Matrix size (width or height) const std::vector<uchar4> &resultCPU, // Just for comparison std::vector<uchar4> &output // Output image ) { ChronoGPU chrGPU; // 3 arrays for GPU uchar4 *dev_inputImg = NULL; uchar4 *dev_output = NULL; float *dev_inputMat = NULL; std::cout << "Allocating 3 arrays: "; chrGPU.start(); const size_t bytesImg = inputImg.size() * sizeof(uchar4); const size_t bytesMat = inputImg.size() * sizeof(float); HANDLE_ERROR(cudaMalloc((void **) &dev_inputImg, bytesImg)); HANDLE_ERROR(cudaMalloc((void **) &dev_output, bytesImg)); HANDLE_ERROR(cudaMalloc((void **) &dev_inputMat, bytesMat)); chrGPU.stop(); std::cout << "Allocation -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; std::cout << "Copying data to GPU : "; chrGPU.start(); // Copy data from host to device (input arrays) HANDLE_ERROR(cudaMemcpy(dev_inputImg, inputImg.data(), bytesImg, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_inputMat, matConv.data(), bytesMat, cudaMemcpyHostToDevice)); chrGPU.stop(); std::cout << "Copying -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; // Launch kernel chrGPU.start();//dim3 std::cout << "Lauching the kernel"; convGPU<<<dim3(16, 16), dim3(32, 32)>>>(dev_inputImg, imgWidth, imgHeight, dev_inputMat, matSize, dev_output); chrGPU.stop(); std::cout << "Calculations -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl; std::cout << "Copying data to CPU : "; chrGPU.start(); // Copy data from device to host (output array) HANDLE_ERROR(cudaMemcpy(output.data(), dev_output, bytesImg, cudaMemcpyDeviceToHost)); chrGPU.stop(); std::cout << "Copying -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; compareImages(resultCPU, output); // Free arrays on device HANDLE_ERROR(cudaFree(dev_inputImg)); HANDLE_ERROR(cudaFree(dev_output)); HANDLE_ERROR(cudaFree(dev_inputMat)); } }
b53025847a41f1a4ca23ace723137b49f12d729f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "VeloUT.cuh" __global__ void veloUT( VeloUTTracking::HitsSoA* dev_ut_hits, int* dev_atomics_storage, uint* dev_velo_track_hit_number, VeloTracking::Hit<mc_check_enabled>* dev_velo_track_hits, VeloState* dev_velo_states, VeloUTTracking::TrackUT* dev_veloUT_tracks, int* dev_atomics_veloUT, PrUTMagnetTool* dev_ut_magnet_tool ) { const int number_of_events = gridDim.x; const int event_number = blockIdx.x; const int number_of_tracks_event = *(dev_atomics_storage + event_number); const int* accumulated_tracks_base_pointer = dev_atomics_storage + number_of_events; const int accumulated_tracks_event = accumulated_tracks_base_pointer[event_number]; VeloUTTracking::HitsSoA* hits_layers_event = dev_ut_hits + event_number; VeloState* velo_states_event = dev_velo_states + accumulated_tracks_event; /* dev_atomics_veloUT contains in an SoA: 1. # of veloUT tracks 2. # velo tracks in UT acceptance */ int* n_veloUT_tracks_event = dev_atomics_veloUT + event_number; VeloUTTracking::TrackUT* veloUT_tracks_event = dev_veloUT_tracks + event_number * VeloUTTracking::max_num_tracks; int* n_velo_tracks_in_UT_event = dev_atomics_veloUT + number_of_events + event_number; // initialize atomic veloUT tracks counter if ( threadIdx.x == 0 ) { *n_veloUT_tracks_event = 0; *n_velo_tracks_in_UT_event = 0; } __syncthreads(); int posLayers[4][85]; fillIterators(hits_layers_event, posLayers); const float* fudgeFactors = &(dev_ut_magnet_tool->dxLayTable[0]); const float* bdlTable = &(dev_ut_magnet_tool->bdlTable[0]); // array to store indices of selected hits in layers // -> can then access the hit information in the HitsSoA int hitCandidatesInLayers[VeloUTTracking::n_layers][VeloUTTracking::max_hit_candidates_per_layer]; int n_hitCandidatesInLayers[VeloUTTracking::n_layers]; for ( int i = 0; i < (number_of_tracks_event + blockDim.x - 1) / blockDim.x; ++i) { const int i_track = i * blockDim.x + threadIdx.x; if ( i_track >= number_of_tracks_event ) continue; if ( velo_states_event[i_track].backward ) continue; if( !veloTrackInUTAcceptance( velo_states_event[i_track] ) ) continue; atomicAdd(n_velo_tracks_in_UT_event, 1); // for storing calculated x position of hits for this track float x_pos_layers[VeloUTTracking::n_layers][VeloUTTracking::max_hit_candidates_per_layer]; for ( int i_layer = 0; i_layer < VeloUTTracking::n_layers; ++i_layer ) { n_hitCandidatesInLayers[i_layer] = 0; } if( !getHits( hitCandidatesInLayers, n_hitCandidatesInLayers, x_pos_layers, posLayers, hits_layers_event, fudgeFactors, velo_states_event[i_track] ) ) continue; TrackHelper helper(velo_states_event[i_track]); // indices within hitCandidatesInLayers for selected hits belonging to best track int hitCandidateIndices[VeloUTTracking::n_layers]; // go through UT layers in forward direction if( !formClusters( hitCandidatesInLayers, n_hitCandidatesInLayers, x_pos_layers, hitCandidateIndices, hits_layers_event, helper, true )){ // go through UT layers in backward direction formClusters( hitCandidatesInLayers, n_hitCandidatesInLayers, x_pos_layers, hitCandidateIndices, hits_layers_event, helper, false); } if ( helper.n_hits > 0 ) { prepareOutputTrack( dev_velo_track_hit_number, dev_velo_track_hits, accumulated_tracks_event, i_track, helper, hitCandidatesInLayers, n_hitCandidatesInLayers, hits_layers_event, x_pos_layers, hitCandidateIndices, veloUT_tracks_event, n_veloUT_tracks_event, bdlTable); } } // velo tracks }
b53025847a41f1a4ca23ace723137b49f12d729f.cu
#include "VeloUT.cuh" __global__ void veloUT( VeloUTTracking::HitsSoA* dev_ut_hits, int* dev_atomics_storage, uint* dev_velo_track_hit_number, VeloTracking::Hit<mc_check_enabled>* dev_velo_track_hits, VeloState* dev_velo_states, VeloUTTracking::TrackUT* dev_veloUT_tracks, int* dev_atomics_veloUT, PrUTMagnetTool* dev_ut_magnet_tool ) { const int number_of_events = gridDim.x; const int event_number = blockIdx.x; const int number_of_tracks_event = *(dev_atomics_storage + event_number); const int* accumulated_tracks_base_pointer = dev_atomics_storage + number_of_events; const int accumulated_tracks_event = accumulated_tracks_base_pointer[event_number]; VeloUTTracking::HitsSoA* hits_layers_event = dev_ut_hits + event_number; VeloState* velo_states_event = dev_velo_states + accumulated_tracks_event; /* dev_atomics_veloUT contains in an SoA: 1. # of veloUT tracks 2. # velo tracks in UT acceptance */ int* n_veloUT_tracks_event = dev_atomics_veloUT + event_number; VeloUTTracking::TrackUT* veloUT_tracks_event = dev_veloUT_tracks + event_number * VeloUTTracking::max_num_tracks; int* n_velo_tracks_in_UT_event = dev_atomics_veloUT + number_of_events + event_number; // initialize atomic veloUT tracks counter if ( threadIdx.x == 0 ) { *n_veloUT_tracks_event = 0; *n_velo_tracks_in_UT_event = 0; } __syncthreads(); int posLayers[4][85]; fillIterators(hits_layers_event, posLayers); const float* fudgeFactors = &(dev_ut_magnet_tool->dxLayTable[0]); const float* bdlTable = &(dev_ut_magnet_tool->bdlTable[0]); // array to store indices of selected hits in layers // -> can then access the hit information in the HitsSoA int hitCandidatesInLayers[VeloUTTracking::n_layers][VeloUTTracking::max_hit_candidates_per_layer]; int n_hitCandidatesInLayers[VeloUTTracking::n_layers]; for ( int i = 0; i < (number_of_tracks_event + blockDim.x - 1) / blockDim.x; ++i) { const int i_track = i * blockDim.x + threadIdx.x; if ( i_track >= number_of_tracks_event ) continue; if ( velo_states_event[i_track].backward ) continue; if( !veloTrackInUTAcceptance( velo_states_event[i_track] ) ) continue; atomicAdd(n_velo_tracks_in_UT_event, 1); // for storing calculated x position of hits for this track float x_pos_layers[VeloUTTracking::n_layers][VeloUTTracking::max_hit_candidates_per_layer]; for ( int i_layer = 0; i_layer < VeloUTTracking::n_layers; ++i_layer ) { n_hitCandidatesInLayers[i_layer] = 0; } if( !getHits( hitCandidatesInLayers, n_hitCandidatesInLayers, x_pos_layers, posLayers, hits_layers_event, fudgeFactors, velo_states_event[i_track] ) ) continue; TrackHelper helper(velo_states_event[i_track]); // indices within hitCandidatesInLayers for selected hits belonging to best track int hitCandidateIndices[VeloUTTracking::n_layers]; // go through UT layers in forward direction if( !formClusters( hitCandidatesInLayers, n_hitCandidatesInLayers, x_pos_layers, hitCandidateIndices, hits_layers_event, helper, true )){ // go through UT layers in backward direction formClusters( hitCandidatesInLayers, n_hitCandidatesInLayers, x_pos_layers, hitCandidateIndices, hits_layers_event, helper, false); } if ( helper.n_hits > 0 ) { prepareOutputTrack( dev_velo_track_hit_number, dev_velo_track_hits, accumulated_tracks_event, i_track, helper, hitCandidatesInLayers, n_hitCandidatesInLayers, hits_layers_event, x_pos_layers, hitCandidateIndices, veloUT_tracks_event, n_veloUT_tracks_event, bdlTable); } } // velo tracks }
2703db4ec9388c876085487cd5e30df263148e7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unistd.h> #include <stdio.h> /* we need these includes for CUDA's random number stuff */ #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> __global__ void farthestpointsamplingKernel(int b,int n,int m, const int *init, const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){ if (m<=0) return; const int BlockSize=512; __shared__ float dists[BlockSize]; __shared__ int dists_i[BlockSize]; const int BufferSize=3072; __shared__ float buf[BufferSize*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ int old=init[i]; if (threadIdx.x==0) idxs[i*m+0]=old; for (int j=threadIdx.x;j<n;j+=blockDim.x){ temp[blockIdx.x*n+j]=1e38; } for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){ buf[j]=dataset[i*n*3+j]; } __syncthreads(); for (int j=1;j<m;j++){ int besti=0; float best=-1; float x1=dataset[i*n*3+old*3+0]; float y1=dataset[i*n*3+old*3+1]; float z1=dataset[i*n*3+old*3+2]; for (int k=threadIdx.x;k<n;k+=blockDim.x){ float td=temp[blockIdx.x*n+k]; float x2,y2,z2; if (k<BufferSize){ x2=buf[k*3+0]; y2=buf[k*3+1]; z2=buf[k*3+2]; }else{ x2=dataset[i*n*3+k*3+0]; y2=dataset[i*n*3+k*3+1]; z2=dataset[i*n*3+k*3+2]; } float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1); float d2=min(d,td); if (d2!=td) temp[blockIdx.x*n+k]=d2; if (d2>best){ best=d2; besti=k; } } dists[threadIdx.x]=best; dists_i[threadIdx.x]=besti; for (int u=0;(1<<u)<blockDim.x;u++){ __syncthreads(); if (threadIdx.x<(blockDim.x>>(u+1))){ int i1=(threadIdx.x*2)<<u; int i2=(threadIdx.x*2+1)<<u; if (dists[i1]<dists[i2]){ dists[i1]=dists[i2]; dists_i[i1]=dists_i[i2]; } } } __syncthreads(); old=dists_i[0]; if (threadIdx.x==0) idxs[i*m+j]=old; } } } // input: radius (1), num (1), data_xyz (b,n,3), search_xyz (b,m,3) // output: idx (b,m,num), dist (b,m, num) __global__ void query_ball_knn_gpu(int b, int n, int m, float radius, int num, const int *shuffled_ids, const float *data_xyz, const float *search_xyz, int *idx, float *dist) { int batch_index = blockIdx.x; data_xyz += n*3*batch_index; search_xyz += m*3*batch_index; shuffled_ids += n*batch_index; idx += m*num*batch_index; dist += m*num*batch_index; // counting how many unique points selected in local region for (int j=threadIdx.x;j<m;j+=blockDim.x){ float search_x=search_xyz[j*3+0]; float search_y=search_xyz[j*3+1]; float search_z=search_xyz[j*3+2]; int sort_id = 0; float bigest= 0; bool is_full=false; for (int l=0;l<num;++l){ dist[j*num + l]=99999.0; idx[j*num + l]=0; } for (int k=0;k<n;++k) { //find the bigest and its id bigest = dist[j*num]; sort_id = 0; for (int l=1;l<num;++l){ if(dist[j*num + l]>bigest){ bigest = dist[j*num + l]; sort_id = l; } } if(bigest<radius){ is_full=true; break; } int kk= shuffled_ids[k]; float data_x=data_xyz[kk*3+0]; float data_y=data_xyz[kk*3+1]; float data_z=data_xyz[kk*3+2]; float d=max(sqrtf((data_x-search_x)*(data_x-search_x)+(data_y-search_y)*(data_y-search_y)+(data_z-search_z)*(data_z-search_z)),1e-20f); //replace the bigest one if(bigest>d){ dist[j*num + sort_id] = d; idx[j*num + sort_id] = kk; } } //if the nearghbors are less than k_num if (is_full || bigest<90000.0) continue; for(int k=0;k<num;++k){ if(dist[j*num + k]>90000.0){ dist[j*num + k] = dist[j*num]; idx[j*num + k] = idx[j*num]; } } } } __global__ void query_ball_gpu(int b, int n, int m, float radius, int nsample, const int *shuffled_ids, const float *data_xyz, const float *search_xyz, int *idx, int *pts_cnt) { int batch_index = blockIdx.x; data_xyz += n*3*batch_index; search_xyz += m*3*batch_index; idx += m*nsample*batch_index; pts_cnt += m*batch_index; // counting how many unique points selected in local region int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { int cnt = 0; float search_x=search_xyz[j*3+0]; float search_y=search_xyz[j*3+1]; float search_z=search_xyz[j*3+2]; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball int kk= shuffled_ids[k]; float data_x=data_xyz[kk*3+0]; float data_y=data_xyz[kk*3+1]; float data_z=data_xyz[kk*3+2]; float d=max(sqrtf((data_x-search_x)*(data_x-search_x)+(data_y-search_y)*(data_y-search_y)+(data_z-search_z)*(data_z-search_z)),1e-20f); if (d<radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = kk; } idx[j*nsample+cnt] = kk; cnt+=1; } } pts_cnt[j] = cnt; } } //k nearst search with cuda __global__ void knn_cuda(int b, int n, int m, int num, const float *data_xyz, const float *search_xyz, int *idx, float *dist) { int batch_index = blockIdx.x; data_xyz += n*3*batch_index; search_xyz += m*3*batch_index; idx += m*num*batch_index; dist += m*num*batch_index; for (int j=threadIdx.x;j<m;j+=blockDim.x){ float search_x=search_xyz[j*3+0]; float search_y=search_xyz[j*3+1]; float search_z=search_xyz[j*3+2]; int sort_id = 0; float bigest= 0; int tmp_id=0; float tmp_dist = 99999.0; for (int l=0;l<num;++l){ dist[j*num + l]=99999.0; idx[j*num + l]=0; } for (int k=0;k<n;++k) { float data_x=data_xyz[k*3+0]; float data_y=data_xyz[k*3+1]; float data_z=data_xyz[k*3+2]; float d=max(sqrtf((data_x-search_x)*(data_x-search_x)+(data_y-search_y)*(data_y-search_y)+(data_z-search_z)*(data_z-search_z)),1e-20f); if(d<1e-10f) { tmp_dist = 0.0; tmp_id = k; continue; } //find the bigest and its id bigest = dist[j*num]; sort_id = 0; for (int l=1;l<num;++l){ if(dist[j*num + l]>bigest){ bigest = dist[j*num + l]; sort_id = l; } } //replace the bigest one if(bigest>d){ dist[j*num + sort_id] = d; idx[j*num + sort_id] = k; } } //put itself into the results if(tmp_dist<1e-10f){ //find the bigest and its id bigest = dist[j*num]; sort_id = 0; for (int l=1;l<num;++l){ if(dist[j*num + l]>dist[j*num + l-1]){ bigest = dist[j*num + l]; sort_id = l; } } dist[j*num + sort_id] = 0.0; idx[j*num + sort_id] = tmp_id; } //if the nearghbors are less than k_num for(int k=0;k<num;++k){ if(dist[j*num + k]>90000.0){ dist[j*num + k] = dist[j*num]; idx[j*num + k] = idx[j*num]; } } } } /* this GPU kernel function is used to initialize the random states */ __global__ void init(unsigned int seed, hiprandState_t* states) { /* we have to initialize the state */ hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ blockIdx.x, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[blockIdx.x]); } /* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */ __global__ void randoms(hiprandState_t* states, int* numbers, int upbound) { /* hiprand works like rand - except that it takes a state as a parameter */ numbers[blockIdx.x] = hiprand(&states[blockIdx.x]) % upbound; } void randomLauncher(int b, int *gpu_nums, int upbound){ /* CUDA's random number library uses hiprandState_t to keep track of the seed value we will store a random state for every thread */ hiprandState_t* states; /* allocate space on the GPU for the random states */ hipMalloc((void**) &states, b * sizeof(hiprandState_t)); /* invoke the GPU to initialize all of the random states */ hipLaunchKernelGGL(( init), dim3(b), dim3(1), 0, 0, time(0), states); /* allocate an array of unsigned ints on the CPU and GPU */ //int cpu_nums[b]; //unsigned int* gpu_nums; //hipMalloc((void**) &gpu_nums, b * sizeof(unsigned int)); /* invoke the kernel to get some random numbers */ hipLaunchKernelGGL(( randoms), dim3(b), dim3(1), 0, 0, states, gpu_nums, upbound); /* copy the random numbers back */ //hipMemcpy(cpu_nums, gpu_nums, b * sizeof(int), hipMemcpyDeviceToHost); /* print them out */ //for (int i = 0; i < b; i++) { // printf("%u\n", cpu_nums[i]); //} //printf("upbound: %u\n", upbound); /* free the memory we allocated for the states and numbers */ hipFree(states); //hipFree(gpu_nums); } void queryBallLauncher(int b, int n, int m, float radius, int k_num, const int *shuffled_ids, const float *data_xyz, const float *search_xyz, int *idx, int *pts_cnt) { hipLaunchKernelGGL(( query_ball_gpu), dim3(b),dim3(256), 0, 0, b,n,m,radius,k_num,shuffled_ids, data_xyz, search_xyz,idx,pts_cnt); //hipDeviceSynchronize(); } //require 32*n working space void farthestpointsamplingLauncher(int b,int n,int m,const int *init, const float * inp,float * temp,int * out){ hipLaunchKernelGGL(( farthestpointsamplingKernel), dim3(32),dim3(512), 0, 0, b,n,m,init, inp,temp,out); } void queryBallKnnLauncher(int b, int n, int m, float radius, int k_num, const int *shuffled_ids, const float *data_xyz, const float *search_xyz, int *idx, float *dist) { hipLaunchKernelGGL(( query_ball_knn_gpu), dim3(b),dim3(256), 0, 0, b,n,m,radius,k_num,shuffled_ids, data_xyz, search_xyz,idx,dist); //check_gpu<<<b,256>>>(b, n, m, nsample,idx); //hipDeviceSynchronize(); } void knnLauncher(int b, int n, int m, int k_num, const float *data_xyz, const float *search_xyz, int *idx, float *dist){ hipLaunchKernelGGL(( knn_cuda), dim3(b), dim3(512), 0, 0, b, n, m, k_num, data_xyz, search_xyz, idx, dist); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // input: inp (b,n,c), idx (b,m) // output: out (b,m,c) __global__ void gatherpointKernel(int b,int n,int m,int channel, const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){ int batch_index = blockIdx.x; inp += n*channel*batch_index; idx += m*batch_index; out += m*channel*batch_index; for(int i = threadIdx.x;i<m;i+=blockDim.x){ int a = idx[i]; for(int j=0;j<channel;++j) out[i*channel + j]=inp[a*channel + j]; } } __global__ void gatheridKernel(int b,int n,int m,int channel, const int * __restrict__ inp,const int * __restrict__ idx,int * __restrict__ out){ int batch_index = blockIdx.x; inp += n*channel*batch_index; idx += m*batch_index; out += m*channel*batch_index; for(int i = threadIdx.x;i<m;i+=blockDim.x){ int a = idx[i]; for(int j=0;j<channel;++j) out[i*channel + j]=inp[a*channel + j]; } } // input: out_g(b,m,c), idx (b,m) // output: inp_g(b,n,c) __global__ void scatteraddpointKernel(int b,int n,int m,int channel, const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){ int batch_index = blockIdx.x; inp_g += n*channel*batch_index; idx += m*batch_index; out_g += m*channel*batch_index; for(int i = threadIdx.x;i<m;i+=blockDim.x){ int a = idx[i]; for(int j=0;j<channel;j++) inp_g[a*channel + j] += out_g[i*channel + j]; } } __global__ void scatteraddidKernel(int b,int n,int m,int channel, const int * __restrict__ out_g,const int * __restrict__ idx,int * __restrict__ inp_g){ int batch_index = blockIdx.x; inp_g += n*channel*batch_index; idx += m*batch_index; out_g += m*channel*batch_index; for(int i = threadIdx.x;i<m;i+=blockDim.x){ int a = idx[i]; for(int j=0;j<channel;++j) atomicAdd(&inp_g[a*channel + j],out_g[i*channel + j]); } } // input: points (b,n,c), idx (b,m,nsample) // output: out (b,m,nsample,c) __global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) { int batch_index = blockIdx.x; points += n*c*batch_index; idx += m*nsample*batch_index; out += m*nsample*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { out[j*nsample*c+k*c+l] = points[ii*c+l]; } } } } // input: grad_out (b,m,nsample,c), idx (b,m,nsample), // output: grad_points (b,n,c) __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int batch_index = blockIdx.x; grad_points += n*c*batch_index; idx += m*nsample*batch_index; grad_out += m*c*nsample*batch_index; int nsc= c*nsample; for(int i=0;i<m;i++){ for(int j=0;j<nsample;j++){ int a = idx[i*nsample+j]; for(int k=threadIdx.x;k<c;k+=blockDim.x){ grad_points[a*c+k] += grad_out[i*nsc + j*c + k]; } } } } // input: eigvectors(eigvs): (b,n,3,3), idx: (b,n) // output: out (b,n,3) __global__ void gather_eigvector_gpu(int b, int n, const float *eigvs, const int *idx, float *out) { int batch_index = blockIdx.x; eigvs += n*9*batch_index; idx += n*batch_index; out += n*3*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<n;j+=stride) { int ii = idx[j]; out[j*3] = eigvs[j*9+ii]; out[j*3+1] = eigvs[j*9+ii+3]; out[j*3+2] = eigvs[j*9+ii+6]; } } // input: grad_out: (b,n,3), idx: (b,n) // output: grad_eigvs (b,n,3, 3) __global__ void gather_eigvector_grad_gpu(int b, int n, const float *grad_out, const int *idx, float *grad_eigvs) { int batch_index = blockIdx.x; grad_eigvs += n*9*batch_index; idx += n*batch_index; grad_out += n*3*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<n;j+=stride) { int ii = idx[j]; atomicAdd(&grad_eigvs[j*9+ii], grad_out[j*3]); atomicAdd(&grad_eigvs[j*9+ii+3], grad_out[j*3+1]); atomicAdd(&grad_eigvs[j*9+ii+6], grad_out[j*3+2]); } } void gatherpointLauncher(int b,int n,int m,int channel, const float * inp,const int * idx,float * out){ hipLaunchKernelGGL(( gatherpointKernel), dim3(b),dim3(256), 0, 0, b,n,m,channel, inp,idx,out); } void gatheridLauncher(int b,int n,int m,int channel, const int * inp,const int * idx,int * out){ hipLaunchKernelGGL(( gatheridKernel), dim3(b),dim3(256), 0, 0, b,n,m,channel,inp,idx,out); } void scatteraddpointLauncher(int b,int n,int m,int channel, const float * out_g,const int * idx,float * inp_g){ hipLaunchKernelGGL(( scatteraddpointKernel), dim3(b),dim3(256), 0, 0, b,n,m,channel,out_g,idx,inp_g); } void scatteraddidLauncher(int b,int n,int m,int channel, const int * out_g,const int * idx,int * inp_g){ hipLaunchKernelGGL(( scatteraddidKernel), dim3(b),dim3(256), 0, 0, b,n,m,channel,out_g,idx,inp_g); } void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ //group_point_gpu<<<dim3(b,nsample,1),256>>>(b,n,c,m,nsample,points,idx,out); hipLaunchKernelGGL(( group_point_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,points,idx,out); //hipDeviceSynchronize(); } void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ hipLaunchKernelGGL(( group_point_grad_gpu), dim3(b),dim3(c), 0, 0, b,n,c,m,nsample,grad_out,idx,grad_points); //hipDeviceSynchronize(); } void gatherEigvectorLauncher(int b, int n, const float *eigvs, const int *idx, float *out){ hipLaunchKernelGGL(( gather_eigvector_gpu), dim3(b),dim3(256), 0, 0, b,n,eigvs,idx,out); //hipDeviceSynchronize(); } void gatherEigvectorGradLauncher(int b, int n, const float *grad_out, const int *idx, float *grad_eigvs){ hipLaunchKernelGGL(( gather_eigvector_grad_gpu), dim3(b),dim3(256), 0, 0, b,n,grad_out,idx,grad_eigvs); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // input: points (b,m,c), idx (b,n,3), weight (b,n,3) // output: out (b,n,c) __global__ void threeinterpolate_kernel(int b, int m, int c, int n, const float * __restrict__ points, const int * __restrict__ idx, const float * __restrict__ weight, float * out) { int batch_index = blockIdx.x; points += m*c*batch_index; idx += n*3*batch_index; weight += n*3*batch_index; out += n*c*batch_index; float w1,w2,w3; int i1,i2,i3; for (int i=threadIdx.x;i<n;i+=blockDim.x){ w1=weight[i*3]; w2=weight[i*3 + 1]; w3=weight[i*3 + 2]; i1=idx[i*3]; i2=idx[i*3 + 1]; i3=idx[i*3 + 2]; for (int j=0;j<c;++j) { out[i*c+j] = points[i1*c+j]*w1 + points[i2*c+j]*w2 + points[i3*c+j]*w3; } } } // input: grad_out (b,n,c), idx (b,n,3), weight (b,n,3) // output: grad_points (b,m,c) __global__ void threeinterpolate_grad_kernel(int b, int n, int c, int m, const float * __restrict__ grad_out, const int * __restrict__ idx, const float * __restrict__ weight, float * grad_points) { int batch_index = blockIdx.x; grad_points += m*c*batch_index; idx += n*3*batch_index; weight += n*3*batch_index; grad_out += n*c*batch_index; float w1,w2,w3; int i1,i2,i3; for (int i=0;i<n;i++){ w1=weight[i*3]; w2=weight[i*3 + 1]; w3=weight[i*3 + 2]; i1=idx[i*3]; i2=idx[i*3 + 1]; i3=idx[i*3 + 2]; for (int j=threadIdx.x;j<c;j+=blockDim.x) { grad_points[i1*c+j] += grad_out[i*c+j]*w1; grad_points[i2*c+j] += grad_out[i*c+j]*w2; grad_points[i3*c+j] += grad_out[i*c+j]*w3; } } } void threeinterpolateLauncher(int b, int m, int c, int n, const float *points, const int *idx, const float *weight, float *out) { hipLaunchKernelGGL(( threeinterpolate_kernel), dim3(b), dim3(256), 0, 0, b, m, c, n, points, idx, weight, out); } void threeinterpolategradLauncher(int b, int n, int c, int m, const float *grad_out, const int *idx, const float *weight, float *grad_points) { hipLaunchKernelGGL(( threeinterpolate_grad_kernel), dim3(b), dim3(c), 0, 0, b, n, c, m, grad_out, idx, weight, grad_points); }
2703db4ec9388c876085487cd5e30df263148e7d.cu
#include <unistd.h> #include <stdio.h> /* we need these includes for CUDA's random number stuff */ #include <curand.h> #include <curand_kernel.h> __global__ void farthestpointsamplingKernel(int b,int n,int m, const int *init, const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){ if (m<=0) return; const int BlockSize=512; __shared__ float dists[BlockSize]; __shared__ int dists_i[BlockSize]; const int BufferSize=3072; __shared__ float buf[BufferSize*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ int old=init[i]; if (threadIdx.x==0) idxs[i*m+0]=old; for (int j=threadIdx.x;j<n;j+=blockDim.x){ temp[blockIdx.x*n+j]=1e38; } for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){ buf[j]=dataset[i*n*3+j]; } __syncthreads(); for (int j=1;j<m;j++){ int besti=0; float best=-1; float x1=dataset[i*n*3+old*3+0]; float y1=dataset[i*n*3+old*3+1]; float z1=dataset[i*n*3+old*3+2]; for (int k=threadIdx.x;k<n;k+=blockDim.x){ float td=temp[blockIdx.x*n+k]; float x2,y2,z2; if (k<BufferSize){ x2=buf[k*3+0]; y2=buf[k*3+1]; z2=buf[k*3+2]; }else{ x2=dataset[i*n*3+k*3+0]; y2=dataset[i*n*3+k*3+1]; z2=dataset[i*n*3+k*3+2]; } float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1); float d2=min(d,td); if (d2!=td) temp[blockIdx.x*n+k]=d2; if (d2>best){ best=d2; besti=k; } } dists[threadIdx.x]=best; dists_i[threadIdx.x]=besti; for (int u=0;(1<<u)<blockDim.x;u++){ __syncthreads(); if (threadIdx.x<(blockDim.x>>(u+1))){ int i1=(threadIdx.x*2)<<u; int i2=(threadIdx.x*2+1)<<u; if (dists[i1]<dists[i2]){ dists[i1]=dists[i2]; dists_i[i1]=dists_i[i2]; } } } __syncthreads(); old=dists_i[0]; if (threadIdx.x==0) idxs[i*m+j]=old; } } } // input: radius (1), num (1), data_xyz (b,n,3), search_xyz (b,m,3) // output: idx (b,m,num), dist (b,m, num) __global__ void query_ball_knn_gpu(int b, int n, int m, float radius, int num, const int *shuffled_ids, const float *data_xyz, const float *search_xyz, int *idx, float *dist) { int batch_index = blockIdx.x; data_xyz += n*3*batch_index; search_xyz += m*3*batch_index; shuffled_ids += n*batch_index; idx += m*num*batch_index; dist += m*num*batch_index; // counting how many unique points selected in local region for (int j=threadIdx.x;j<m;j+=blockDim.x){ float search_x=search_xyz[j*3+0]; float search_y=search_xyz[j*3+1]; float search_z=search_xyz[j*3+2]; int sort_id = 0; float bigest= 0; bool is_full=false; for (int l=0;l<num;++l){ dist[j*num + l]=99999.0; idx[j*num + l]=0; } for (int k=0;k<n;++k) { //find the bigest and its id bigest = dist[j*num]; sort_id = 0; for (int l=1;l<num;++l){ if(dist[j*num + l]>bigest){ bigest = dist[j*num + l]; sort_id = l; } } if(bigest<radius){ is_full=true; break; } int kk= shuffled_ids[k]; float data_x=data_xyz[kk*3+0]; float data_y=data_xyz[kk*3+1]; float data_z=data_xyz[kk*3+2]; float d=max(sqrtf((data_x-search_x)*(data_x-search_x)+(data_y-search_y)*(data_y-search_y)+(data_z-search_z)*(data_z-search_z)),1e-20f); //replace the bigest one if(bigest>d){ dist[j*num + sort_id] = d; idx[j*num + sort_id] = kk; } } //if the nearghbors are less than k_num if (is_full || bigest<90000.0) continue; for(int k=0;k<num;++k){ if(dist[j*num + k]>90000.0){ dist[j*num + k] = dist[j*num]; idx[j*num + k] = idx[j*num]; } } } } __global__ void query_ball_gpu(int b, int n, int m, float radius, int nsample, const int *shuffled_ids, const float *data_xyz, const float *search_xyz, int *idx, int *pts_cnt) { int batch_index = blockIdx.x; data_xyz += n*3*batch_index; search_xyz += m*3*batch_index; idx += m*nsample*batch_index; pts_cnt += m*batch_index; // counting how many unique points selected in local region int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { int cnt = 0; float search_x=search_xyz[j*3+0]; float search_y=search_xyz[j*3+1]; float search_z=search_xyz[j*3+2]; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball int kk= shuffled_ids[k]; float data_x=data_xyz[kk*3+0]; float data_y=data_xyz[kk*3+1]; float data_z=data_xyz[kk*3+2]; float d=max(sqrtf((data_x-search_x)*(data_x-search_x)+(data_y-search_y)*(data_y-search_y)+(data_z-search_z)*(data_z-search_z)),1e-20f); if (d<radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = kk; } idx[j*nsample+cnt] = kk; cnt+=1; } } pts_cnt[j] = cnt; } } //k nearst search with cuda __global__ void knn_cuda(int b, int n, int m, int num, const float *data_xyz, const float *search_xyz, int *idx, float *dist) { int batch_index = blockIdx.x; data_xyz += n*3*batch_index; search_xyz += m*3*batch_index; idx += m*num*batch_index; dist += m*num*batch_index; for (int j=threadIdx.x;j<m;j+=blockDim.x){ float search_x=search_xyz[j*3+0]; float search_y=search_xyz[j*3+1]; float search_z=search_xyz[j*3+2]; int sort_id = 0; float bigest= 0; int tmp_id=0; float tmp_dist = 99999.0; for (int l=0;l<num;++l){ dist[j*num + l]=99999.0; idx[j*num + l]=0; } for (int k=0;k<n;++k) { float data_x=data_xyz[k*3+0]; float data_y=data_xyz[k*3+1]; float data_z=data_xyz[k*3+2]; float d=max(sqrtf((data_x-search_x)*(data_x-search_x)+(data_y-search_y)*(data_y-search_y)+(data_z-search_z)*(data_z-search_z)),1e-20f); if(d<1e-10f) { tmp_dist = 0.0; tmp_id = k; continue; } //find the bigest and its id bigest = dist[j*num]; sort_id = 0; for (int l=1;l<num;++l){ if(dist[j*num + l]>bigest){ bigest = dist[j*num + l]; sort_id = l; } } //replace the bigest one if(bigest>d){ dist[j*num + sort_id] = d; idx[j*num + sort_id] = k; } } //put itself into the results if(tmp_dist<1e-10f){ //find the bigest and its id bigest = dist[j*num]; sort_id = 0; for (int l=1;l<num;++l){ if(dist[j*num + l]>dist[j*num + l-1]){ bigest = dist[j*num + l]; sort_id = l; } } dist[j*num + sort_id] = 0.0; idx[j*num + sort_id] = tmp_id; } //if the nearghbors are less than k_num for(int k=0;k<num;++k){ if(dist[j*num + k]>90000.0){ dist[j*num + k] = dist[j*num]; idx[j*num + k] = idx[j*num]; } } } } /* this GPU kernel function is used to initialize the random states */ __global__ void init(unsigned int seed, curandState_t* states) { /* we have to initialize the state */ curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ blockIdx.x, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[blockIdx.x]); } /* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */ __global__ void randoms(curandState_t* states, int* numbers, int upbound) { /* curand works like rand - except that it takes a state as a parameter */ numbers[blockIdx.x] = curand(&states[blockIdx.x]) % upbound; } void randomLauncher(int b, int *gpu_nums, int upbound){ /* CUDA's random number library uses curandState_t to keep track of the seed value we will store a random state for every thread */ curandState_t* states; /* allocate space on the GPU for the random states */ cudaMalloc((void**) &states, b * sizeof(curandState_t)); /* invoke the GPU to initialize all of the random states */ init<<<b, 1>>>(time(0), states); /* allocate an array of unsigned ints on the CPU and GPU */ //int cpu_nums[b]; //unsigned int* gpu_nums; //cudaMalloc((void**) &gpu_nums, b * sizeof(unsigned int)); /* invoke the kernel to get some random numbers */ randoms<<<b, 1>>>(states, gpu_nums, upbound); /* copy the random numbers back */ //cudaMemcpy(cpu_nums, gpu_nums, b * sizeof(int), cudaMemcpyDeviceToHost); /* print them out */ //for (int i = 0; i < b; i++) { // printf("%u\n", cpu_nums[i]); //} //printf("upbound: %u\n", upbound); /* free the memory we allocated for the states and numbers */ cudaFree(states); //cudaFree(gpu_nums); } void queryBallLauncher(int b, int n, int m, float radius, int k_num, const int *shuffled_ids, const float *data_xyz, const float *search_xyz, int *idx, int *pts_cnt) { query_ball_gpu<<<b,256>>>(b,n,m,radius,k_num,shuffled_ids, data_xyz, search_xyz,idx,pts_cnt); //cudaDeviceSynchronize(); } //require 32*n working space void farthestpointsamplingLauncher(int b,int n,int m,const int *init, const float * inp,float * temp,int * out){ farthestpointsamplingKernel<<<32,512>>>(b,n,m,init, inp,temp,out); } void queryBallKnnLauncher(int b, int n, int m, float radius, int k_num, const int *shuffled_ids, const float *data_xyz, const float *search_xyz, int *idx, float *dist) { query_ball_knn_gpu<<<b,256>>>(b,n,m,radius,k_num,shuffled_ids, data_xyz, search_xyz,idx,dist); //check_gpu<<<b,256>>>(b, n, m, nsample,idx); //cudaDeviceSynchronize(); } void knnLauncher(int b, int n, int m, int k_num, const float *data_xyz, const float *search_xyz, int *idx, float *dist){ knn_cuda<<<b, 512>>>(b, n, m, k_num, data_xyz, search_xyz, idx, dist); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // input: inp (b,n,c), idx (b,m) // output: out (b,m,c) __global__ void gatherpointKernel(int b,int n,int m,int channel, const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){ int batch_index = blockIdx.x; inp += n*channel*batch_index; idx += m*batch_index; out += m*channel*batch_index; for(int i = threadIdx.x;i<m;i+=blockDim.x){ int a = idx[i]; for(int j=0;j<channel;++j) out[i*channel + j]=inp[a*channel + j]; } } __global__ void gatheridKernel(int b,int n,int m,int channel, const int * __restrict__ inp,const int * __restrict__ idx,int * __restrict__ out){ int batch_index = blockIdx.x; inp += n*channel*batch_index; idx += m*batch_index; out += m*channel*batch_index; for(int i = threadIdx.x;i<m;i+=blockDim.x){ int a = idx[i]; for(int j=0;j<channel;++j) out[i*channel + j]=inp[a*channel + j]; } } // input: out_g(b,m,c), idx (b,m) // output: inp_g(b,n,c) __global__ void scatteraddpointKernel(int b,int n,int m,int channel, const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){ int batch_index = blockIdx.x; inp_g += n*channel*batch_index; idx += m*batch_index; out_g += m*channel*batch_index; for(int i = threadIdx.x;i<m;i+=blockDim.x){ int a = idx[i]; for(int j=0;j<channel;j++) inp_g[a*channel + j] += out_g[i*channel + j]; } } __global__ void scatteraddidKernel(int b,int n,int m,int channel, const int * __restrict__ out_g,const int * __restrict__ idx,int * __restrict__ inp_g){ int batch_index = blockIdx.x; inp_g += n*channel*batch_index; idx += m*batch_index; out_g += m*channel*batch_index; for(int i = threadIdx.x;i<m;i+=blockDim.x){ int a = idx[i]; for(int j=0;j<channel;++j) atomicAdd(&inp_g[a*channel + j],out_g[i*channel + j]); } } // input: points (b,n,c), idx (b,m,nsample) // output: out (b,m,nsample,c) __global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) { int batch_index = blockIdx.x; points += n*c*batch_index; idx += m*nsample*batch_index; out += m*nsample*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { out[j*nsample*c+k*c+l] = points[ii*c+l]; } } } } // input: grad_out (b,m,nsample,c), idx (b,m,nsample), // output: grad_points (b,n,c) __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int batch_index = blockIdx.x; grad_points += n*c*batch_index; idx += m*nsample*batch_index; grad_out += m*c*nsample*batch_index; int nsc= c*nsample; for(int i=0;i<m;i++){ for(int j=0;j<nsample;j++){ int a = idx[i*nsample+j]; for(int k=threadIdx.x;k<c;k+=blockDim.x){ grad_points[a*c+k] += grad_out[i*nsc + j*c + k]; } } } } // input: eigvectors(eigvs): (b,n,3,3), idx: (b,n) // output: out (b,n,3) __global__ void gather_eigvector_gpu(int b, int n, const float *eigvs, const int *idx, float *out) { int batch_index = blockIdx.x; eigvs += n*9*batch_index; idx += n*batch_index; out += n*3*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<n;j+=stride) { int ii = idx[j]; out[j*3] = eigvs[j*9+ii]; out[j*3+1] = eigvs[j*9+ii+3]; out[j*3+2] = eigvs[j*9+ii+6]; } } // input: grad_out: (b,n,3), idx: (b,n) // output: grad_eigvs (b,n,3, 3) __global__ void gather_eigvector_grad_gpu(int b, int n, const float *grad_out, const int *idx, float *grad_eigvs) { int batch_index = blockIdx.x; grad_eigvs += n*9*batch_index; idx += n*batch_index; grad_out += n*3*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<n;j+=stride) { int ii = idx[j]; atomicAdd(&grad_eigvs[j*9+ii], grad_out[j*3]); atomicAdd(&grad_eigvs[j*9+ii+3], grad_out[j*3+1]); atomicAdd(&grad_eigvs[j*9+ii+6], grad_out[j*3+2]); } } void gatherpointLauncher(int b,int n,int m,int channel, const float * inp,const int * idx,float * out){ gatherpointKernel<<<b,256>>>(b,n,m,channel, inp,idx,out); } void gatheridLauncher(int b,int n,int m,int channel, const int * inp,const int * idx,int * out){ gatheridKernel<<<b,256>>>(b,n,m,channel,inp,idx,out); } void scatteraddpointLauncher(int b,int n,int m,int channel, const float * out_g,const int * idx,float * inp_g){ scatteraddpointKernel<<<b,256>>>(b,n,m,channel,out_g,idx,inp_g); } void scatteraddidLauncher(int b,int n,int m,int channel, const int * out_g,const int * idx,int * inp_g){ scatteraddidKernel<<<b,256>>>(b,n,m,channel,out_g,idx,inp_g); } void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ //group_point_gpu<<<dim3(b,nsample,1),256>>>(b,n,c,m,nsample,points,idx,out); group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out); //cudaDeviceSynchronize(); } void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ group_point_grad_gpu<<<b,c>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //cudaDeviceSynchronize(); } void gatherEigvectorLauncher(int b, int n, const float *eigvs, const int *idx, float *out){ gather_eigvector_gpu<<<b,256>>>(b,n,eigvs,idx,out); //cudaDeviceSynchronize(); } void gatherEigvectorGradLauncher(int b, int n, const float *grad_out, const int *idx, float *grad_eigvs){ gather_eigvector_grad_gpu<<<b,256>>>(b,n,grad_out,idx,grad_eigvs); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // input: points (b,m,c), idx (b,n,3), weight (b,n,3) // output: out (b,n,c) __global__ void threeinterpolate_kernel(int b, int m, int c, int n, const float * __restrict__ points, const int * __restrict__ idx, const float * __restrict__ weight, float * out) { int batch_index = blockIdx.x; points += m*c*batch_index; idx += n*3*batch_index; weight += n*3*batch_index; out += n*c*batch_index; float w1,w2,w3; int i1,i2,i3; for (int i=threadIdx.x;i<n;i+=blockDim.x){ w1=weight[i*3]; w2=weight[i*3 + 1]; w3=weight[i*3 + 2]; i1=idx[i*3]; i2=idx[i*3 + 1]; i3=idx[i*3 + 2]; for (int j=0;j<c;++j) { out[i*c+j] = points[i1*c+j]*w1 + points[i2*c+j]*w2 + points[i3*c+j]*w3; } } } // input: grad_out (b,n,c), idx (b,n,3), weight (b,n,3) // output: grad_points (b,m,c) __global__ void threeinterpolate_grad_kernel(int b, int n, int c, int m, const float * __restrict__ grad_out, const int * __restrict__ idx, const float * __restrict__ weight, float * grad_points) { int batch_index = blockIdx.x; grad_points += m*c*batch_index; idx += n*3*batch_index; weight += n*3*batch_index; grad_out += n*c*batch_index; float w1,w2,w3; int i1,i2,i3; for (int i=0;i<n;i++){ w1=weight[i*3]; w2=weight[i*3 + 1]; w3=weight[i*3 + 2]; i1=idx[i*3]; i2=idx[i*3 + 1]; i3=idx[i*3 + 2]; for (int j=threadIdx.x;j<c;j+=blockDim.x) { grad_points[i1*c+j] += grad_out[i*c+j]*w1; grad_points[i2*c+j] += grad_out[i*c+j]*w2; grad_points[i3*c+j] += grad_out[i*c+j]*w3; } } } void threeinterpolateLauncher(int b, int m, int c, int n, const float *points, const int *idx, const float *weight, float *out) { threeinterpolate_kernel<<<b, 256>>>(b, m, c, n, points, idx, weight, out); } void threeinterpolategradLauncher(int b, int n, int c, int m, const float *grad_out, const int *idx, const float *weight, float *grad_points) { threeinterpolate_grad_kernel<<<b, c>>>(b, n, c, m, grad_out, idx, weight, grad_points); }
d0034fc0f205d90e6f432c04b042ec75ee5745f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Devraj Mehta //Sugarscape //Applies CUDA to ABM in Sugarscape //Using pinned memory //standard imports #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> //OpenGL imports #include <GL/glew.h> #include <GLUT/glut.h> //for Mac //preprocessor definitions #define R 50 //radius of mouse agent //agent model typedef struct { int x; int y; float sugar; float metabolism; int vision; } Agent; //variable declarations int sugar_g, agent_g; //the number of blocks needed for sugar and agent kernel int sugar_b, agent_b; //number of threads per block float *sugar_levels, *sugar_maximums; //flattened matrices on host float *sl_d, *sm_d; //matrices copied to device float fps; //frames per second: epoque char wtitle[256]; //title of glut window Agent *agent_list, *a_d; //list of agents in world on host and device int *agent_matrix, *am_d; //matrix of agent locations int freeze_flag; //flag to halt all updating long long int step; //counter for the number of iterations int num_steps; int W; //latteral resolution of world int H; //vertical resolution of world int N; //number of agents //variables used in gl double xmin,ymin,xmax,ymax; int w, h; //screen size //gaussian function to determine layout of sugar float gauss(int x, int y, int x0, int y0, int sx, int sy) { return expf(-0.5*(x-x0)*(x-x0)/sx/sx)*expf(-0.5*(y-y0)*(y-y0)/sy/sy); } //kernel to grow sugar patches at each time step __global__ void grow_sugar(float *s_levels, float *s_maximums) { int i = blockIdx.x * blockDim.x + threadIdx.x; //index of sugar cell float growth_rate = 0.1f; s_levels[i] += growth_rate; if(s_levels[i] > s_maximums[i]) s_levels[i] = s_maximums[i]; /* if(s_levels[i] > 0.5 && s_maximums[i] < 0.7) //making a ridge s_levels[i] = 0.5; if(s_levels[i] > 0.7 && s_maximums[i] < 0.9) s_levels[i] = 0.7;*/ } //kernel to updatae the agents' sugar levels __global__ void feed_agents(Agent *a_list, int *a_mat, float *s_levels, int width, int height) { //declare variables int k; //agent's index float p; //amount of sugar agent can eat //set index k = blockIdx.x * blockDim.x + threadIdx.x; //if the agent is alive (this is bad for cuda simd) if(a_list[k].sugar > 0.0) { //increment metabolism a_list[k].sugar -= a_list[k].metabolism; //check if agent survived if(a_list[k].sugar <= 0.0) { a_mat[width * a_list[k].x + a_list[k].y] = -1; } else { //if stil alive take sugar from current patch p = 1.0 - a_list[k].sugar; if(p > s_levels[width * a_list[k].x + a_list[k].y]) { a_list[k].sugar += s_levels[width * a_list[k].x + a_list[k].y]; s_levels[width * a_list[k].x + a_list[k].y] = 0.0; } else { s_levels[width * a_list[k].x + a_list[k].y] -= p; a_list[k].sugar = 1.0; } } } } //kernel to update the agents' location __global__ void move_agents(Agent *a_list, int *a_mat, float *s_levels, int width, int height) { //declare variables int i, j, k, x, y, f; //k: index of agent float b; //best sugar level seen int bx, by; //chosen location of best sugar level seen int v; //agent's vision k = blockIdx.x * blockDim.x + threadIdx.x; //if the agent is alive (this is kinda bad for cuda simt) if(a_list[k].sugar > 0.0) { f=1; x = a_list[k].x; y = a_list[k].y; v = a_list[k].vision; while(f) { f=0; b = s_levels[width*x+y]; //best known sugar level bx = x; by = y; for(i = -v; i <= v /*&& b <= s*/; i++) { if(i+x >=0 && i+x < width) { for(j = -v; j <= v /*&& b <= s*/; j++) { if(j+y < height && j+y >= 0 && a_mat[width*(i+x)+j+y] == -1) { //check valid & vacant if(s_levels[width*(i+x)+j+y] > b) { b = s_levels[width*(i+x)+j+y]; bx = i+x; by = j+y; } } } } } //move to location if(a_mat[width*bx+by] == k); //simply dont move else if(atomicExch(a_mat+width*bx+by, k) == -1) { //atomic test and set operation a_mat[width * a_list[k].x + a_list[k].y] = -1; a_list[k].x = bx; a_list[k].y = by; } else f=1; } } } //method to display world in opengl void display(void) { if(step==num_steps) { //exit program and release memory hipHostFree(sugar_levels); free(sugar_maximums); hipHostFree(agent_list); hipHostFree(agent_matrix); hipFree(sl_d); hipFree(sm_d); hipFree(a_d); hipFree(am_d); exit(0); } //check to see if permitted if(freeze_flag) return; //declare variables int x,y,z; long long int n_left=0; double a,b,c,d; hipError_t cet; //begin time fps = (float)clock()/CLOCKS_PER_SEC; //run instructions on device hipLaunchKernelGGL(( grow_sugar), dim3(sugar_g), dim3(sugar_b), 0, 0, sl_d, sm_d); //asynchronus, nonblocking //block until all threads finish hipDeviceSynchronize(); //run instructions on device hipLaunchKernelGGL(( feed_agents), dim3(agent_g), dim3(agent_b), 0, 0, a_d, am_d, sl_d, W, H); //asynchronus, nonblocking //block until all threads finish hipDeviceSynchronize(); //run instructions on device hipLaunchKernelGGL(( move_agents), dim3(agent_g), dim3(agent_b), 0, 0, a_d, am_d, sl_d, W, H); //asynchronus, nonblocking //block until all threads finish hipDeviceSynchronize(); //copy updated matrices from device to host hipMemcpy(sugar_levels, sl_d, W*H*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(agent_matrix, am_d, W*H*sizeof(int), hipMemcpyDeviceToHost); //hipMemcpy(agent_list, a_d, N*sizeof(Agent), hipMemcpyDeviceToHost); //display world glClear(GL_COLOR_BUFFER_BIT); glBegin(GL_POINTS); for(x=0; x<w; x++) { for(y=0; y<h; y++) { z = W * (W * x / w) + H * y /h; //coordinates translated for sugarscape a = sugar_levels[z]; d = 0.0; if(agent_matrix[z] != -1) { d = 1.0; n_left++; } glColor3f(0.0, a, d); b = xmax * x / w - xmin; //coordinates translated for gl c = ymax * y / h - ymin; glVertex2f(b,c); } } glEnd(); //scale counter if(W<w && H<h) n_left = (W*H)*n_left/(w*h); else if(W>w || H>h) n_left = N; //finish and display epoque fps = 1.0 / ((float)clock()/CLOCKS_PER_SEC - fps); printf("%f\n", 1.0f/ fps); sprintf(wtitle, "Sugarscape (GPU) %d x %d %lld agents %3.1f fps step #%lld",W,H,n_left,fps, step); glutSetWindowTitle(wtitle); //end rendering and display updated buffer contents glutSwapBuffers(); //increment counter step++; //check for errors in cuda cet = hipGetLastError(); if(cet != hipSuccess) printf("CUDA ERROR: %s\n", hipGetErrorString(cet)); } //sets all agents to initial states void reset_agents() { int j, k, m, n; //temp iter var for(k=0; k<W*H; k++) agent_matrix[k] = -1; for(k=0; k<N; k++) { j=1; while(j) { m = random() % H; n = random() % W; if(agent_matrix[ W * m + n] == -1) j=0; } agent_matrix[W * m + n] = k; agent_list[k].x = m; agent_list[k].y = n; agent_list[k].sugar = 1.0; agent_list[k].metabolism = 0.001 * (random() % 900) + 0.1; agent_list[k].vision = random() % 9 + 1; } hipMemcpy(a_d, agent_list, N*sizeof(Agent), hipMemcpyHostToDevice); hipMemcpy(am_d, agent_matrix, W*H*sizeof(int), hipMemcpyHostToDevice); } //zeros all sugar levels void reset_sugar() { int k; //temp iter var for(k=0;k<W*H;k++) sugar_levels[k] = 0; hipMemcpy(sl_d, sugar_levels, W*H*sizeof(float), hipMemcpyHostToDevice); } //method to register opengl key events void keyfunc(unsigned char key,int xscr,int yscr) { if(key=='q') { //exit program and release memory hipHostFree(sugar_levels); free(sugar_maximums); hipHostFree(agent_list); hipHostFree(agent_matrix); hipFree(sl_d); hipFree(sm_d); hipFree(a_d); hipFree(am_d); printf("\nq pressed; program exiting.\n"); exit(0); } else if(key=='r') { //reset all sugar levels to zero reset_agents(); reset_sugar(); } else if(key=='s') { //reset all sugar levels to zero reset_sugar(); } else if(key=='a') { //randomize and reset all agents reset_agents(); } else if(key=='p') { //(un)freeze all updating freeze_flag = freeze_flag ? 0 : 1; } } //method to register opengl mouse events void mouse(int button,int state,int xscr,int yscr) { int j, k; //temp iter vars if(button==GLUT_LEFT_BUTTON) { if(state==GLUT_DOWN) { //set clicked upon sugar level to zero for(j=W*xscr/w-R; j<W*xscr/w+R; j++) if(j<W && j>=0) for(k=H*(h-yscr)/h-R; k<=H*(h-yscr)/h+R; k++) if(k<H && k>=0) sugar_levels[W * j + k] = 0; hipMemcpy(sl_d, sugar_levels, W*H*sizeof(float), hipMemcpyHostToDevice); //glutPostRedisplay(); // callback } } else if(button==GLUT_RIGHT_BUTTON) { if(state==GLUT_DOWN) { //print this cell's properties printf("(%d, %d)\n", W*xscr/w, H*(h-yscr)/h); printf("\tpatch:\n\t\tsugar\t%f\n\t\tmax\t%f\n", sugar_levels[W * (W*xscr/w) + (H*(h-yscr)/h)], sugar_maximums[W * (W*xscr/w) + (H*(h-yscr)/h)]); if(agent_matrix[W * (W*xscr/w) + (H*(h-yscr)/h)]==-1) printf("\tagent:\n\t\tnone\n"); else { printf("\tagent:\n\t\tvision\t%d\n\t\tmetab\t%f\n\t\tsugar\t%f\n", agent_list[agent_matrix[W * (W*xscr/w) + (H*(h-yscr)/h)]].vision, agent_list[agent_matrix[W * (W*xscr/w) + (H*(h-yscr)/h)]].metabolism, agent_list[agent_matrix[W * (W*xscr/w) + (H*(h-yscr)/h)]].sugar); } printf("\tmatrix:\n\t\tindex\t%d\n\t\tvalue\t%d\n", W * (W*xscr/w) + (H*(h-yscr)/h), agent_matrix[W * (W*xscr/w) + (H*(h-yscr)/h)]); } } } //method to register opengl mouse movement events void move(int xscr, int yscr) { int j, k; //temp iter vars //set clicked upon sugar level to zero for(j=W*xscr/w-R; j<W*xscr/w+R; j++) if(j<W && j>=0) for(k=H*(h-yscr)/h-R; k<=H*(h-yscr)/h+R; k++) if(k<H && k>=0) sugar_levels[W * j + k] = 0; hipMemcpy(sl_d, sugar_levels, W*H*sizeof(float), hipMemcpyHostToDevice); } //method to handle the screen being resized void reshape(int wscr,int hscr) { w=wscr; h=hscr; glViewport(0,0,(GLsizei)w,(GLsizei)h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); xmin=ymin=0.0; xmax=ymax=1.0; if(w<=h) ymax=1.0*(GLfloat)h/(GLfloat)w; else xmax=1.0*(GLfloat)w/(GLfloat)h; gluOrtho2D(xmin,xmax,ymin,ymax); glMatrixMode(GL_MODELVIEW); } //main method int main(int argc, char* argv[]) { //fetching for input if(argc!=3) { printf("please input N then number of steps\n"); return 0; } N = atoi(argv[1]); W = N; H = N; w = N; h = N; N = N*N; num_steps = atoi(argv[2]); //declare variables int i, j, k, l; //temp iter vars hipDeviceProp_t dp; //properties for device int max_threads; //the maximum number of threads per block //set best device (the one with the most multiprocessors) hipGetDeviceCount(&i); k=0; l=0; for(j=0; j<i; j++) { hipGetDeviceProperties(&dp, j); if(dp.multiProcessorCount > l) { l = dp.multiProcessorCount; k = j; } } hipSetDevice(k); hipGetDeviceProperties(&dp, k); printf("Operating on %s\n", dp.name); //define variables max_threads = dp.maxThreadsPerBlock; //find the smallest x so that x*y=N, y<M, & x & y are both integers: perhaps there is a better way sugar_b = max_threads; while((W*H) % sugar_b != 0) sugar_b--; sugar_g = W*H/sugar_b; agent_b = max_threads; while(N % agent_b != 0) agent_b--; agent_g = N/agent_b; freeze_flag = 0; step = 0; //allocate matrices hipHostMalloc((void**)&sugar_levels, W*H*sizeof(float)); hipMalloc((void**)&sl_d, W*H*sizeof(float)); sugar_maximums = (float*)malloc( W*H*sizeof(float)); hipMalloc((void**)&sm_d, W*H*sizeof(float)); hipHostMalloc((void**)&agent_list, N*sizeof(Agent)); hipMalloc((void**)&a_d, N*sizeof(Agent)); hipHostMalloc((void**)&agent_matrix, W*H*sizeof(int)); hipMalloc((void**)&am_d, W*H*sizeof(int)); //initialize matrices on host memset(sugar_levels, 0, W*H*sizeof(float)); for(i=0;i<W;i++) for(j=0;j<H;j++) sugar_maximums[W*i+j] = gauss(i,j,W/4,H*3/4,W/5,H/5) + gauss(i,j,W*3/4,H/4,W/5,H/5); reset_sugar(); reset_agents(); //copy matrices to device hipMemcpy(sm_d, sugar_maximums, W*H*sizeof(float), hipMemcpyHostToDevice); //setup OpenGL glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); glutInitWindowSize(w,h); glutInitWindowPosition(0,0); glutCreateWindow("Sugarscape"); glClearColor(1.0,1.0,1.0,0.0); //gl callback functions glutDisplayFunc(display); glutIdleFunc(display); glutMouseFunc(mouse); glutMotionFunc(move); glutKeyboardFunc(keyfunc); glutReshapeFunc(reshape); //begin looping sugarscape glutMainLoop(); return 0; }
d0034fc0f205d90e6f432c04b042ec75ee5745f5.cu
//Devraj Mehta //Sugarscape //Applies CUDA to ABM in Sugarscape //Using pinned memory //standard imports #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> //OpenGL imports #include <GL/glew.h> #include <GLUT/glut.h> //for Mac //preprocessor definitions #define R 50 //radius of mouse agent //agent model typedef struct { int x; int y; float sugar; float metabolism; int vision; } Agent; //variable declarations int sugar_g, agent_g; //the number of blocks needed for sugar and agent kernel int sugar_b, agent_b; //number of threads per block float *sugar_levels, *sugar_maximums; //flattened matrices on host float *sl_d, *sm_d; //matrices copied to device float fps; //frames per second: epoque char wtitle[256]; //title of glut window Agent *agent_list, *a_d; //list of agents in world on host and device int *agent_matrix, *am_d; //matrix of agent locations int freeze_flag; //flag to halt all updating long long int step; //counter for the number of iterations int num_steps; int W; //latteral resolution of world int H; //vertical resolution of world int N; //number of agents //variables used in gl double xmin,ymin,xmax,ymax; int w, h; //screen size //gaussian function to determine layout of sugar float gauss(int x, int y, int x0, int y0, int sx, int sy) { return expf(-0.5*(x-x0)*(x-x0)/sx/sx)*expf(-0.5*(y-y0)*(y-y0)/sy/sy); } //kernel to grow sugar patches at each time step __global__ void grow_sugar(float *s_levels, float *s_maximums) { int i = blockIdx.x * blockDim.x + threadIdx.x; //index of sugar cell float growth_rate = 0.1f; s_levels[i] += growth_rate; if(s_levels[i] > s_maximums[i]) s_levels[i] = s_maximums[i]; /* if(s_levels[i] > 0.5 && s_maximums[i] < 0.7) //making a ridge s_levels[i] = 0.5; if(s_levels[i] > 0.7 && s_maximums[i] < 0.9) s_levels[i] = 0.7;*/ } //kernel to updatae the agents' sugar levels __global__ void feed_agents(Agent *a_list, int *a_mat, float *s_levels, int width, int height) { //declare variables int k; //agent's index float p; //amount of sugar agent can eat //set index k = blockIdx.x * blockDim.x + threadIdx.x; //if the agent is alive (this is bad for cuda simd) if(a_list[k].sugar > 0.0) { //increment metabolism a_list[k].sugar -= a_list[k].metabolism; //check if agent survived if(a_list[k].sugar <= 0.0) { a_mat[width * a_list[k].x + a_list[k].y] = -1; } else { //if stil alive take sugar from current patch p = 1.0 - a_list[k].sugar; if(p > s_levels[width * a_list[k].x + a_list[k].y]) { a_list[k].sugar += s_levels[width * a_list[k].x + a_list[k].y]; s_levels[width * a_list[k].x + a_list[k].y] = 0.0; } else { s_levels[width * a_list[k].x + a_list[k].y] -= p; a_list[k].sugar = 1.0; } } } } //kernel to update the agents' location __global__ void move_agents(Agent *a_list, int *a_mat, float *s_levels, int width, int height) { //declare variables int i, j, k, x, y, f; //k: index of agent float b; //best sugar level seen int bx, by; //chosen location of best sugar level seen int v; //agent's vision k = blockIdx.x * blockDim.x + threadIdx.x; //if the agent is alive (this is kinda bad for cuda simt) if(a_list[k].sugar > 0.0) { f=1; x = a_list[k].x; y = a_list[k].y; v = a_list[k].vision; while(f) { f=0; b = s_levels[width*x+y]; //best known sugar level bx = x; by = y; for(i = -v; i <= v /*&& b <= s*/; i++) { if(i+x >=0 && i+x < width) { for(j = -v; j <= v /*&& b <= s*/; j++) { if(j+y < height && j+y >= 0 && a_mat[width*(i+x)+j+y] == -1) { //check valid & vacant if(s_levels[width*(i+x)+j+y] > b) { b = s_levels[width*(i+x)+j+y]; bx = i+x; by = j+y; } } } } } //move to location if(a_mat[width*bx+by] == k); //simply dont move else if(atomicExch(a_mat+width*bx+by, k) == -1) { //atomic test and set operation a_mat[width * a_list[k].x + a_list[k].y] = -1; a_list[k].x = bx; a_list[k].y = by; } else f=1; } } } //method to display world in opengl void display(void) { if(step==num_steps) { //exit program and release memory cudaFreeHost(sugar_levels); free(sugar_maximums); cudaFreeHost(agent_list); cudaFreeHost(agent_matrix); cudaFree(sl_d); cudaFree(sm_d); cudaFree(a_d); cudaFree(am_d); exit(0); } //check to see if permitted if(freeze_flag) return; //declare variables int x,y,z; long long int n_left=0; double a,b,c,d; cudaError_t cet; //begin time fps = (float)clock()/CLOCKS_PER_SEC; //run instructions on device grow_sugar<<< sugar_g, sugar_b>>>(sl_d, sm_d); //asynchronus, nonblocking //block until all threads finish cudaThreadSynchronize(); //run instructions on device feed_agents<<< agent_g, agent_b>>>(a_d, am_d, sl_d, W, H); //asynchronus, nonblocking //block until all threads finish cudaThreadSynchronize(); //run instructions on device move_agents<<< agent_g, agent_b>>>(a_d, am_d, sl_d, W, H); //asynchronus, nonblocking //block until all threads finish cudaThreadSynchronize(); //copy updated matrices from device to host cudaMemcpy(sugar_levels, sl_d, W*H*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(agent_matrix, am_d, W*H*sizeof(int), cudaMemcpyDeviceToHost); //cudaMemcpy(agent_list, a_d, N*sizeof(Agent), cudaMemcpyDeviceToHost); //display world glClear(GL_COLOR_BUFFER_BIT); glBegin(GL_POINTS); for(x=0; x<w; x++) { for(y=0; y<h; y++) { z = W * (W * x / w) + H * y /h; //coordinates translated for sugarscape a = sugar_levels[z]; d = 0.0; if(agent_matrix[z] != -1) { d = 1.0; n_left++; } glColor3f(0.0, a, d); b = xmax * x / w - xmin; //coordinates translated for gl c = ymax * y / h - ymin; glVertex2f(b,c); } } glEnd(); //scale counter if(W<w && H<h) n_left = (W*H)*n_left/(w*h); else if(W>w || H>h) n_left = N; //finish and display epoque fps = 1.0 / ((float)clock()/CLOCKS_PER_SEC - fps); printf("%f\n", 1.0f/ fps); sprintf(wtitle, "Sugarscape (GPU) %d x %d %lld agents %3.1f fps step #%lld",W,H,n_left,fps, step); glutSetWindowTitle(wtitle); //end rendering and display updated buffer contents glutSwapBuffers(); //increment counter step++; //check for errors in cuda cet = cudaGetLastError(); if(cet != cudaSuccess) printf("CUDA ERROR: %s\n", cudaGetErrorString(cet)); } //sets all agents to initial states void reset_agents() { int j, k, m, n; //temp iter var for(k=0; k<W*H; k++) agent_matrix[k] = -1; for(k=0; k<N; k++) { j=1; while(j) { m = random() % H; n = random() % W; if(agent_matrix[ W * m + n] == -1) j=0; } agent_matrix[W * m + n] = k; agent_list[k].x = m; agent_list[k].y = n; agent_list[k].sugar = 1.0; agent_list[k].metabolism = 0.001 * (random() % 900) + 0.1; agent_list[k].vision = random() % 9 + 1; } cudaMemcpy(a_d, agent_list, N*sizeof(Agent), cudaMemcpyHostToDevice); cudaMemcpy(am_d, agent_matrix, W*H*sizeof(int), cudaMemcpyHostToDevice); } //zeros all sugar levels void reset_sugar() { int k; //temp iter var for(k=0;k<W*H;k++) sugar_levels[k] = 0; cudaMemcpy(sl_d, sugar_levels, W*H*sizeof(float), cudaMemcpyHostToDevice); } //method to register opengl key events void keyfunc(unsigned char key,int xscr,int yscr) { if(key=='q') { //exit program and release memory cudaFreeHost(sugar_levels); free(sugar_maximums); cudaFreeHost(agent_list); cudaFreeHost(agent_matrix); cudaFree(sl_d); cudaFree(sm_d); cudaFree(a_d); cudaFree(am_d); printf("\nq pressed; program exiting.\n"); exit(0); } else if(key=='r') { //reset all sugar levels to zero reset_agents(); reset_sugar(); } else if(key=='s') { //reset all sugar levels to zero reset_sugar(); } else if(key=='a') { //randomize and reset all agents reset_agents(); } else if(key=='p') { //(un)freeze all updating freeze_flag = freeze_flag ? 0 : 1; } } //method to register opengl mouse events void mouse(int button,int state,int xscr,int yscr) { int j, k; //temp iter vars if(button==GLUT_LEFT_BUTTON) { if(state==GLUT_DOWN) { //set clicked upon sugar level to zero for(j=W*xscr/w-R; j<W*xscr/w+R; j++) if(j<W && j>=0) for(k=H*(h-yscr)/h-R; k<=H*(h-yscr)/h+R; k++) if(k<H && k>=0) sugar_levels[W * j + k] = 0; cudaMemcpy(sl_d, sugar_levels, W*H*sizeof(float), cudaMemcpyHostToDevice); //glutPostRedisplay(); // callback } } else if(button==GLUT_RIGHT_BUTTON) { if(state==GLUT_DOWN) { //print this cell's properties printf("(%d, %d)\n", W*xscr/w, H*(h-yscr)/h); printf("\tpatch:\n\t\tsugar\t%f\n\t\tmax\t%f\n", sugar_levels[W * (W*xscr/w) + (H*(h-yscr)/h)], sugar_maximums[W * (W*xscr/w) + (H*(h-yscr)/h)]); if(agent_matrix[W * (W*xscr/w) + (H*(h-yscr)/h)]==-1) printf("\tagent:\n\t\tnone\n"); else { printf("\tagent:\n\t\tvision\t%d\n\t\tmetab\t%f\n\t\tsugar\t%f\n", agent_list[agent_matrix[W * (W*xscr/w) + (H*(h-yscr)/h)]].vision, agent_list[agent_matrix[W * (W*xscr/w) + (H*(h-yscr)/h)]].metabolism, agent_list[agent_matrix[W * (W*xscr/w) + (H*(h-yscr)/h)]].sugar); } printf("\tmatrix:\n\t\tindex\t%d\n\t\tvalue\t%d\n", W * (W*xscr/w) + (H*(h-yscr)/h), agent_matrix[W * (W*xscr/w) + (H*(h-yscr)/h)]); } } } //method to register opengl mouse movement events void move(int xscr, int yscr) { int j, k; //temp iter vars //set clicked upon sugar level to zero for(j=W*xscr/w-R; j<W*xscr/w+R; j++) if(j<W && j>=0) for(k=H*(h-yscr)/h-R; k<=H*(h-yscr)/h+R; k++) if(k<H && k>=0) sugar_levels[W * j + k] = 0; cudaMemcpy(sl_d, sugar_levels, W*H*sizeof(float), cudaMemcpyHostToDevice); } //method to handle the screen being resized void reshape(int wscr,int hscr) { w=wscr; h=hscr; glViewport(0,0,(GLsizei)w,(GLsizei)h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); xmin=ymin=0.0; xmax=ymax=1.0; if(w<=h) ymax=1.0*(GLfloat)h/(GLfloat)w; else xmax=1.0*(GLfloat)w/(GLfloat)h; gluOrtho2D(xmin,xmax,ymin,ymax); glMatrixMode(GL_MODELVIEW); } //main method int main(int argc, char* argv[]) { //fetching for input if(argc!=3) { printf("please input N then number of steps\n"); return 0; } N = atoi(argv[1]); W = N; H = N; w = N; h = N; N = N*N; num_steps = atoi(argv[2]); //declare variables int i, j, k, l; //temp iter vars cudaDeviceProp dp; //properties for device int max_threads; //the maximum number of threads per block //set best device (the one with the most multiprocessors) cudaGetDeviceCount(&i); k=0; l=0; for(j=0; j<i; j++) { cudaGetDeviceProperties(&dp, j); if(dp.multiProcessorCount > l) { l = dp.multiProcessorCount; k = j; } } cudaSetDevice(k); cudaGetDeviceProperties(&dp, k); printf("Operating on %s\n", dp.name); //define variables max_threads = dp.maxThreadsPerBlock; //find the smallest x so that x*y=N, y<M, & x & y are both integers: perhaps there is a better way sugar_b = max_threads; while((W*H) % sugar_b != 0) sugar_b--; sugar_g = W*H/sugar_b; agent_b = max_threads; while(N % agent_b != 0) agent_b--; agent_g = N/agent_b; freeze_flag = 0; step = 0; //allocate matrices cudaMallocHost((void**)&sugar_levels, W*H*sizeof(float)); cudaMalloc((void**)&sl_d, W*H*sizeof(float)); sugar_maximums = (float*)malloc( W*H*sizeof(float)); cudaMalloc((void**)&sm_d, W*H*sizeof(float)); cudaMallocHost((void**)&agent_list, N*sizeof(Agent)); cudaMalloc((void**)&a_d, N*sizeof(Agent)); cudaMallocHost((void**)&agent_matrix, W*H*sizeof(int)); cudaMalloc((void**)&am_d, W*H*sizeof(int)); //initialize matrices on host memset(sugar_levels, 0, W*H*sizeof(float)); for(i=0;i<W;i++) for(j=0;j<H;j++) sugar_maximums[W*i+j] = gauss(i,j,W/4,H*3/4,W/5,H/5) + gauss(i,j,W*3/4,H/4,W/5,H/5); reset_sugar(); reset_agents(); //copy matrices to device cudaMemcpy(sm_d, sugar_maximums, W*H*sizeof(float), cudaMemcpyHostToDevice); //setup OpenGL glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); glutInitWindowSize(w,h); glutInitWindowPosition(0,0); glutCreateWindow("Sugarscape"); glClearColor(1.0,1.0,1.0,0.0); //gl callback functions glutDisplayFunc(display); glutIdleFunc(display); glutMouseFunc(mouse); glutMotionFunc(move); glutKeyboardFunc(keyfunc); glutReshapeFunc(reshape); //begin looping sugarscape glutMainLoop(); return 0; }
1e23f04fd846648923d3edce7e50d2915dc12518.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <time.h> #include <stdlib.h> #include <random> #include <vector> #include <chrono> #include <deque> #include <algorithm> #include <iterator> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define BLOCK_SIZE 1024 __global__ void cnt_reduce(int *arr, const int n, const int m) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < m) { int j = n-i-1; if (i != j) { arr[i] = arr[i] + arr[j]; } } } int count_size(int *cnt_arr, int n) { int m = (n+1)/2; while (n > 1) { hipLaunchKernelGGL(( cnt_reduce), dim3((m + BLOCK_SIZE - 1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, cnt_arr, n, m); n = m; m = (n+1)/2; } hipDeviceSynchronize(); return cnt_arr[0]; } void random_vector(int *arr, const int n, const int min_val=0.0, const int max_val=1000.0) { static std::random_device rd; static std::mt19937 mte(rd()); std::uniform_int_distribution<int> dist(min_val, max_val); for (int i = 0; i < n; i++) { arr[i] = dist(mte); } } bool check_correctness(int *arr, int pred, int n) { int cnt = 0; for (int i = 0; i < n; i++) { cnt += arr[i]; } return pred == cnt; } int main(void) { int n = 1 << 30; int *arr, *temp; hipMallocManaged(&arr, n*sizeof(int)); random_vector(arr, n, 0, 1); temp = new int[n]; std::copy(arr, arr+n, temp); auto t1 = std::chrono::high_resolution_clock::now(); int cnt = count_size(arr, n); auto t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count(); std::cout << duration << std::endl; t1 = std::chrono::high_resolution_clock::now(); std::cout << check_correctness(temp, cnt, n) << std::endl; t2 = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count(); std::cout << duration << std::endl; hipFree(arr); return 0; }
1e23f04fd846648923d3edce7e50d2915dc12518.cu
#include <iostream> #include <math.h> #include <time.h> #include <stdlib.h> #include <random> #include <vector> #include <chrono> #include <deque> #include <algorithm> #include <iterator> #include <curand.h> #include <curand_kernel.h> #define BLOCK_SIZE 1024 __global__ void cnt_reduce(int *arr, const int n, const int m) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < m) { int j = n-i-1; if (i != j) { arr[i] = arr[i] + arr[j]; } } } int count_size(int *cnt_arr, int n) { int m = (n+1)/2; while (n > 1) { cnt_reduce<<<(m + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE>>>(cnt_arr, n, m); n = m; m = (n+1)/2; } cudaDeviceSynchronize(); return cnt_arr[0]; } void random_vector(int *arr, const int n, const int min_val=0.0, const int max_val=1000.0) { static std::random_device rd; static std::mt19937 mte(rd()); std::uniform_int_distribution<int> dist(min_val, max_val); for (int i = 0; i < n; i++) { arr[i] = dist(mte); } } bool check_correctness(int *arr, int pred, int n) { int cnt = 0; for (int i = 0; i < n; i++) { cnt += arr[i]; } return pred == cnt; } int main(void) { int n = 1 << 30; int *arr, *temp; cudaMallocManaged(&arr, n*sizeof(int)); random_vector(arr, n, 0, 1); temp = new int[n]; std::copy(arr, arr+n, temp); auto t1 = std::chrono::high_resolution_clock::now(); int cnt = count_size(arr, n); auto t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count(); std::cout << duration << std::endl; t1 = std::chrono::high_resolution_clock::now(); std::cout << check_correctness(temp, cnt, n) << std::endl; t2 = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count(); std::cout << duration << std::endl; cudaFree(arr); return 0; }
57f85b3944177ee8913b907c56d5192eab465804.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define STORE_DERIVATIVE_1(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (deriv##INDEX##_1*0x100000000))); #define STORE_DERIVATIVE_2(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].deriv##INDEX*0x100000000))); typedef struct { real4 posq; real3 force; ATOM_PARAMETER_DATA #ifdef NEED_PADDING float padding; #endif } AtomData; /** * Compute a force based on pair interactions. */ extern "C" __global__ void computeN2Energy(unsigned long long* __restrict__ forceBuffers, real* __restrict__ energyBuffer, const real4* __restrict__ posq, const unsigned int* __restrict__ exclusions, const ushort2* __restrict__ exclusionTiles, #ifdef USE_CUTOFF const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize, unsigned int maxTiles, const real4* __restrict__ blockCenter, const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms #else unsigned int numTiles #endif PARAMETER_ARGUMENTS) { const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE; const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); const unsigned int tbx = threadIdx.x - tgx; real energy = 0; __shared__ AtomData localData[THREAD_BLOCK_SIZE]; // First loop: process tiles that contain exclusions. const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) { const ushort2 tileIndices = exclusionTiles[pos]; const unsigned int x = tileIndices.x; const unsigned int y = tileIndices.y; real3 force = make_real3(0); DECLARE_ATOM1_DERIVATIVES unsigned int atom1 = x*TILE_SIZE + tgx; real4 posq1 = posq[atom1]; LOAD_ATOM1_PARAMETERS #ifdef USE_EXCLUSIONS unsigned int excl = exclusions[pos*TILE_SIZE+tgx]; #endif if (x == y) { // This tile is on the diagonal. const unsigned int localAtomIndex = threadIdx.x; localData[localAtomIndex].posq = posq1; LOAD_LOCAL_PARAMETERS_FROM_1 for (unsigned int j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+j; real4 posq2 = localData[atom2].posq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+j; real dEdR = 0; real tempEnergy = 0; #ifdef USE_EXCLUSIONS bool isExcluded = !(excl & 0x1); #endif if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS && atom1 != atom2) { COMPUTE_INTERACTION dEdR /= -r; } energy += 0.5f*tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; #ifdef USE_CUTOFF } #endif #ifdef USE_EXCLUSIONS excl >>= 1; #endif } } else { // This is an off-diagonal tile. const unsigned int localAtomIndex = threadIdx.x; unsigned int j = y*TILE_SIZE + tgx; localData[localAtomIndex].posq = posq[j]; LOAD_LOCAL_PARAMETERS_FROM_GLOBAL localData[localAtomIndex].force = make_real3(0); CLEAR_LOCAL_DERIVATIVES #ifdef USE_EXCLUSIONS excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx)); #endif unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = localData[atom2].posq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+tj; real dEdR = 0; real tempEnergy = 0; #ifdef USE_EXCLUSIONS bool isExcluded = !(excl & 0x1); #endif if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif #ifdef USE_EXCLUSIONS excl >>= 1; #endif tj = (tj + 1) & (TILE_SIZE - 1); } } // Write results. unsigned int offset = x*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000))); STORE_DERIVATIVES_1 if (x != y) { offset = y*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000))); STORE_DERIVATIVES_2 } } // Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all // of them (no cutoff). #ifdef USE_CUTOFF unsigned int numTiles = interactionCount[0]; int pos = (int) (warp*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps); int end = (int) ((warp+1)*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps); #else int pos = (int) (warp*(long long)numTiles/totalWarps); int end = (int) ((warp+1)*(long long)numTiles/totalWarps); #endif int skipBase = 0; int currentSkipIndex = tbx; __shared__ int atomIndices[THREAD_BLOCK_SIZE]; __shared__ volatile int skipTiles[THREAD_BLOCK_SIZE]; skipTiles[threadIdx.x] = -1; while (pos < end) { const bool isExcluded = false; real3 force = make_real3(0); DECLARE_ATOM1_DERIVATIVES bool includeTile = true; // Extract the coordinates of this tile. int x, y; bool singlePeriodicCopy = false; #ifdef USE_CUTOFF if (numTiles <= maxTiles) { x = tiles[pos]; real4 blockSizeX = blockSize[x]; singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= CUTOFF && 0.5f*periodicBoxSize.y-blockSizeX.y >= CUTOFF && 0.5f*periodicBoxSize.z-blockSizeX.z >= CUTOFF); } else #endif { y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos)); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error. y += (x < y ? -1 : 1); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); } // Skip over tiles that have exclusions, since they were already processed. while (skipTiles[tbx+TILE_SIZE-1] < pos) { if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) { ushort2 tile = exclusionTiles[skipBase+tgx]; skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2; } else skipTiles[threadIdx.x] = end; skipBase += TILE_SIZE; currentSkipIndex = tbx; } while (skipTiles[currentSkipIndex] < pos) currentSkipIndex++; includeTile = (skipTiles[currentSkipIndex] != pos); } if (includeTile) { unsigned int atom1 = x*TILE_SIZE + tgx; // Load atom data for this tile. real4 posq1 = posq[atom1]; LOAD_ATOM1_PARAMETERS const unsigned int localAtomIndex = threadIdx.x; #ifdef USE_CUTOFF unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx); #else unsigned int j = y*TILE_SIZE + tgx; #endif atomIndices[threadIdx.x] = j; if (j < PADDED_NUM_ATOMS) { localData[localAtomIndex].posq = posq[j]; LOAD_LOCAL_PARAMETERS_FROM_GLOBAL localData[localAtomIndex].force = make_real3(0); CLEAR_LOCAL_DERIVATIVES } #ifdef USE_PERIODIC if (singlePeriodicCopy) { // The box is small enough that we can just translate all the atoms into a single periodic // box, then skip having to apply periodic boundary conditions later. real4 blockCenterX = blockCenter[x]; posq1.x -= floor((posq1.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; posq1.y -= floor((posq1.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; posq1.z -= floor((posq1.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; localData[threadIdx.x].posq.x -= floor((localData[threadIdx.x].posq.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; localData[threadIdx.x].posq.y -= floor((localData[threadIdx.x].posq.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; localData[threadIdx.x].posq.z -= floor((localData[threadIdx.x].posq.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = localData[atom2].posq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; real dEdR = 0; real tempEnergy = 0; if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif tj = (tj + 1) & (TILE_SIZE - 1); } } else #endif { // We need to apply periodic boundary conditions separately for each interaction. unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = localData[atom2].posq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; real dEdR = 0; real tempEnergy = 0; if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif tj = (tj + 1) & (TILE_SIZE - 1); } } // Write results. atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000))); atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000))); atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000))); unsigned int offset = atom1; STORE_DERIVATIVES_1 #ifdef USE_CUTOFF unsigned int atom2 = atomIndices[threadIdx.x]; #else unsigned int atom2 = y*TILE_SIZE + tgx; #endif if (atom2 < PADDED_NUM_ATOMS) { atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000))); atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000))); atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000))); offset = atom2; STORE_DERIVATIVES_2 } } pos++; } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy; }
57f85b3944177ee8913b907c56d5192eab465804.cu
#define STORE_DERIVATIVE_1(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (deriv##INDEX##_1*0x100000000))); #define STORE_DERIVATIVE_2(INDEX) atomicAdd(&derivBuffers[offset+(INDEX-1)*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].deriv##INDEX*0x100000000))); typedef struct { real4 posq; real3 force; ATOM_PARAMETER_DATA #ifdef NEED_PADDING float padding; #endif } AtomData; /** * Compute a force based on pair interactions. */ extern "C" __global__ void computeN2Energy(unsigned long long* __restrict__ forceBuffers, real* __restrict__ energyBuffer, const real4* __restrict__ posq, const unsigned int* __restrict__ exclusions, const ushort2* __restrict__ exclusionTiles, #ifdef USE_CUTOFF const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize, unsigned int maxTiles, const real4* __restrict__ blockCenter, const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms #else unsigned int numTiles #endif PARAMETER_ARGUMENTS) { const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE; const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); const unsigned int tbx = threadIdx.x - tgx; real energy = 0; __shared__ AtomData localData[THREAD_BLOCK_SIZE]; // First loop: process tiles that contain exclusions. const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) { const ushort2 tileIndices = exclusionTiles[pos]; const unsigned int x = tileIndices.x; const unsigned int y = tileIndices.y; real3 force = make_real3(0); DECLARE_ATOM1_DERIVATIVES unsigned int atom1 = x*TILE_SIZE + tgx; real4 posq1 = posq[atom1]; LOAD_ATOM1_PARAMETERS #ifdef USE_EXCLUSIONS unsigned int excl = exclusions[pos*TILE_SIZE+tgx]; #endif if (x == y) { // This tile is on the diagonal. const unsigned int localAtomIndex = threadIdx.x; localData[localAtomIndex].posq = posq1; LOAD_LOCAL_PARAMETERS_FROM_1 for (unsigned int j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+j; real4 posq2 = localData[atom2].posq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+j; real dEdR = 0; real tempEnergy = 0; #ifdef USE_EXCLUSIONS bool isExcluded = !(excl & 0x1); #endif if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS && atom1 != atom2) { COMPUTE_INTERACTION dEdR /= -r; } energy += 0.5f*tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; #ifdef USE_CUTOFF } #endif #ifdef USE_EXCLUSIONS excl >>= 1; #endif } } else { // This is an off-diagonal tile. const unsigned int localAtomIndex = threadIdx.x; unsigned int j = y*TILE_SIZE + tgx; localData[localAtomIndex].posq = posq[j]; LOAD_LOCAL_PARAMETERS_FROM_GLOBAL localData[localAtomIndex].force = make_real3(0); CLEAR_LOCAL_DERIVATIVES #ifdef USE_EXCLUSIONS excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx)); #endif unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = localData[atom2].posq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+tj; real dEdR = 0; real tempEnergy = 0; #ifdef USE_EXCLUSIONS bool isExcluded = !(excl & 0x1); #endif if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif #ifdef USE_EXCLUSIONS excl >>= 1; #endif tj = (tj + 1) & (TILE_SIZE - 1); } } // Write results. unsigned int offset = x*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000))); STORE_DERIVATIVES_1 if (x != y) { offset = y*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000))); STORE_DERIVATIVES_2 } } // Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all // of them (no cutoff). #ifdef USE_CUTOFF unsigned int numTiles = interactionCount[0]; int pos = (int) (warp*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps); int end = (int) ((warp+1)*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps); #else int pos = (int) (warp*(long long)numTiles/totalWarps); int end = (int) ((warp+1)*(long long)numTiles/totalWarps); #endif int skipBase = 0; int currentSkipIndex = tbx; __shared__ int atomIndices[THREAD_BLOCK_SIZE]; __shared__ volatile int skipTiles[THREAD_BLOCK_SIZE]; skipTiles[threadIdx.x] = -1; while (pos < end) { const bool isExcluded = false; real3 force = make_real3(0); DECLARE_ATOM1_DERIVATIVES bool includeTile = true; // Extract the coordinates of this tile. int x, y; bool singlePeriodicCopy = false; #ifdef USE_CUTOFF if (numTiles <= maxTiles) { x = tiles[pos]; real4 blockSizeX = blockSize[x]; singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= CUTOFF && 0.5f*periodicBoxSize.y-blockSizeX.y >= CUTOFF && 0.5f*periodicBoxSize.z-blockSizeX.z >= CUTOFF); } else #endif { y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos)); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error. y += (x < y ? -1 : 1); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); } // Skip over tiles that have exclusions, since they were already processed. while (skipTiles[tbx+TILE_SIZE-1] < pos) { if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) { ushort2 tile = exclusionTiles[skipBase+tgx]; skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2; } else skipTiles[threadIdx.x] = end; skipBase += TILE_SIZE; currentSkipIndex = tbx; } while (skipTiles[currentSkipIndex] < pos) currentSkipIndex++; includeTile = (skipTiles[currentSkipIndex] != pos); } if (includeTile) { unsigned int atom1 = x*TILE_SIZE + tgx; // Load atom data for this tile. real4 posq1 = posq[atom1]; LOAD_ATOM1_PARAMETERS const unsigned int localAtomIndex = threadIdx.x; #ifdef USE_CUTOFF unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx); #else unsigned int j = y*TILE_SIZE + tgx; #endif atomIndices[threadIdx.x] = j; if (j < PADDED_NUM_ATOMS) { localData[localAtomIndex].posq = posq[j]; LOAD_LOCAL_PARAMETERS_FROM_GLOBAL localData[localAtomIndex].force = make_real3(0); CLEAR_LOCAL_DERIVATIVES } #ifdef USE_PERIODIC if (singlePeriodicCopy) { // The box is small enough that we can just translate all the atoms into a single periodic // box, then skip having to apply periodic boundary conditions later. real4 blockCenterX = blockCenter[x]; posq1.x -= floor((posq1.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; posq1.y -= floor((posq1.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; posq1.z -= floor((posq1.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; localData[threadIdx.x].posq.x -= floor((localData[threadIdx.x].posq.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; localData[threadIdx.x].posq.y -= floor((localData[threadIdx.x].posq.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; localData[threadIdx.x].posq.z -= floor((localData[threadIdx.x].posq.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = localData[atom2].posq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; real dEdR = 0; real tempEnergy = 0; if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif tj = (tj + 1) & (TILE_SIZE - 1); } } else #endif { // We need to apply periodic boundary conditions separately for each interaction. unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = localData[atom2].posq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x; delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y; delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z; #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; #ifdef USE_CUTOFF if (r2 < CUTOFF_SQUARED) { #endif real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; real dEdR = 0; real tempEnergy = 0; if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) { COMPUTE_INTERACTION dEdR /= -r; } energy += tempEnergy; delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; atom2 = tbx+tj; localData[atom2].force.x += delta.x; localData[atom2].force.y += delta.y; localData[atom2].force.z += delta.z; RECORD_DERIVATIVE_2 #ifdef USE_CUTOFF } #endif tj = (tj + 1) & (TILE_SIZE - 1); } } // Write results. atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000))); atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000))); atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000))); unsigned int offset = atom1; STORE_DERIVATIVES_1 #ifdef USE_CUTOFF unsigned int atom2 = atomIndices[threadIdx.x]; #else unsigned int atom2 = y*TILE_SIZE + tgx; #endif if (atom2 < PADDED_NUM_ATOMS) { atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000))); atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000))); atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000))); offset = atom2; STORE_DERIVATIVES_2 } } pos++; } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy; }
49042f1aa04866836e6a9aaf9694ba0fa06b0e36.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/impl/BinaryDistance.cuh> #include <faiss/gpu/impl/BinaryFlatIndex.cuh> namespace faiss { namespace gpu { BinaryFlatIndex::BinaryFlatIndex(GpuResources* res, int dim, MemorySpace space) : resources_(res), dim_(dim), num_(0), rawData_( res, makeSpaceAlloc( AllocType::FlatData, space, res->getDefaultStreamCurrentDevice())) { FAISS_ASSERT(dim % 8 == 0); } /// Returns the number of vectors we contain int BinaryFlatIndex::getSize() const { return vectors_.getSize(0); } int BinaryFlatIndex::getDim() const { return vectors_.getSize(1) * 8; } void BinaryFlatIndex::reserve(size_t numVecs, hipStream_t stream) { rawData_.reserve(numVecs * (dim_ / 8) * sizeof(unsigned int), stream); } Tensor<unsigned char, 2, true>& BinaryFlatIndex::getVectorsRef() { return vectors_; } void BinaryFlatIndex::query( Tensor<unsigned char, 2, true>& input, int k, Tensor<int, 2, true>& outDistances, Tensor<int, 2, true>& outIndices) { auto stream = resources_->getDefaultStreamCurrentDevice(); runBinaryDistance(vectors_, input, outDistances, outIndices, k, stream); } void BinaryFlatIndex::add( const unsigned char* data, int numVecs, hipStream_t stream) { if (numVecs == 0) { return; } rawData_.append( (char*)data, (size_t)(dim_ / 8) * numVecs * sizeof(unsigned char), stream, true /* reserve exactly */); num_ += numVecs; DeviceTensor<unsigned char, 2, true> vectors( (unsigned char*)rawData_.data(), {(int)num_, (dim_ / 8)}); vectors_ = std::move(vectors); } void BinaryFlatIndex::reset() { rawData_.clear(); vectors_ = DeviceTensor<unsigned char, 2, true>(); num_ = 0; } } // namespace gpu } // namespace faiss
49042f1aa04866836e6a9aaf9694ba0fa06b0e36.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/impl/BinaryDistance.cuh> #include <faiss/gpu/impl/BinaryFlatIndex.cuh> namespace faiss { namespace gpu { BinaryFlatIndex::BinaryFlatIndex(GpuResources* res, int dim, MemorySpace space) : resources_(res), dim_(dim), num_(0), rawData_( res, makeSpaceAlloc( AllocType::FlatData, space, res->getDefaultStreamCurrentDevice())) { FAISS_ASSERT(dim % 8 == 0); } /// Returns the number of vectors we contain int BinaryFlatIndex::getSize() const { return vectors_.getSize(0); } int BinaryFlatIndex::getDim() const { return vectors_.getSize(1) * 8; } void BinaryFlatIndex::reserve(size_t numVecs, cudaStream_t stream) { rawData_.reserve(numVecs * (dim_ / 8) * sizeof(unsigned int), stream); } Tensor<unsigned char, 2, true>& BinaryFlatIndex::getVectorsRef() { return vectors_; } void BinaryFlatIndex::query( Tensor<unsigned char, 2, true>& input, int k, Tensor<int, 2, true>& outDistances, Tensor<int, 2, true>& outIndices) { auto stream = resources_->getDefaultStreamCurrentDevice(); runBinaryDistance(vectors_, input, outDistances, outIndices, k, stream); } void BinaryFlatIndex::add( const unsigned char* data, int numVecs, cudaStream_t stream) { if (numVecs == 0) { return; } rawData_.append( (char*)data, (size_t)(dim_ / 8) * numVecs * sizeof(unsigned char), stream, true /* reserve exactly */); num_ += numVecs; DeviceTensor<unsigned char, 2, true> vectors( (unsigned char*)rawData_.data(), {(int)num_, (dim_ / 8)}); vectors_ = std::move(vectors); } void BinaryFlatIndex::reset() { rawData_.clear(); vectors_ = DeviceTensor<unsigned char, 2, true>(); num_ = 0; } } // namespace gpu } // namespace faiss
fe148be7664ca56cf07fce4377eeee0025b5568f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <limits> #include <utility> #include "paddle/fluid/framework/gpu_utils.h" #include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using Dim3 = framework::Dim3; using Index3 = framework::Index3; struct EqualTo { constexpr bool operator()(int a, int b) const { return a == b; } }; struct GreaterThan { constexpr bool operator()(int a, int b) const { return a > b; } }; // Value can be decided in compile time. template <typename FUN, int INT_32 = 32> constexpr bool CheckProperTileSize(int tile_long, int tile_short, int size_T, FUN op) { return (size_T == 16 && ((tile_long == INT_32 && op(tile_short, 4)) || (tile_long == 2 * INT_32 && op(tile_short, 4)) || (tile_long == 4 * INT_32 && op(tile_short, 4)) || (tile_long == 8 * INT_32 && op(tile_short, 2)))) || (size_T == 8 && ((tile_long == INT_32 && op(tile_short, 15)) || (tile_long == 2 * INT_32 && op(tile_short, 15)) || (tile_long == 4 * INT_32 && op(tile_short, 8)) || (tile_long == 8 * INT_32 && op(tile_short, 4)) || (tile_long == 16 * INT_32 && op(tile_short, 2)))) || ((size_T == 4 || size_T == 2 || size_T == 1) && ((tile_long == INT_32 && op(tile_short, 15)) || (tile_long == 2 * INT_32 && op(tile_short, 15)) || (tile_long == 4 * INT_32 && op(tile_short, 8)) || (tile_long == 8 * INT_32 && op(tile_short, 4)) || (tile_long == 16 * INT_32 && op(tile_short, 2)) || (tile_long == 16 * INT_32 && op(tile_short, 2)))); } constexpr bool CheckLongTileSize(int tile_long, int tile_short, int size_T) { return CheckProperTileSize(tile_long, tile_short, size_T, EqualTo()); } constexpr bool CheckOutsideTileSize(int tile_long, int tile_short, int size_T) { return CheckProperTileSize(tile_long, tile_short, size_T, GreaterThan()); } constexpr bool CheckNonLongTileSize(int tile_long, int tile_short, int size_T) { return !CheckOutsideTileSize(tile_long, tile_short, size_T) && (CheckOutsideTileSize(tile_long * 2, tile_short, size_T) || CheckOutsideTileSize(tile_long, tile_short + 1, size_T)) && !CheckLongTileSize(tile_long, tile_short, size_T); } // Use SM to do data transfer, load a tile into SM then store out. // All tile read and write are colascing, so can speedup memory copy template <typename T, int NumThreads, int TileX, int TileY> __global__ void TilingSwapDim1And2(const T* __restrict__ input, Dim3 input_dims, T* __restrict__ output) { assert(blockDim.x == NumThreads); assert(blockDim.y == 1); assert(blockDim.z == 1); assert(gridDim.y == 1); assert(gridDim.z == 1); constexpr int BlockReadRows = NumThreads / TileY; constexpr int BlockWriteRows = NumThreads / TileX; // One extra line in the inner dimension to avoid share memory bank conflict. __shared__ __align__( alignof(T)) char share_mem_ptr[TileX * (TileY + 1) * sizeof(T)]; typedef T(*ShareMemory)[TileY + 1]; ShareMemory tile_sm = reinterpret_cast<ShareMemory>(share_mem_ptr); int x = threadIdx.x; Dim3 output_dims = { input_dims[0], input_dims[2], input_dims[1], }; // Align dim to Tiles Dim3 tile_aligned_input_dim = { input_dims[0], (input_dims[1] + TileX - 1) / TileX, (input_dims[2] + TileY - 1) / TileY, }; // Converts block idx to tile index, each block process a tile Index3 input_block_tile_index = ConvertTensorIndex(blockIdx.x, tile_aligned_input_dim); // Compute real index align to tile:0, 32, 64... Index3 block_tile_index_in_input = { input_block_tile_index[0], input_block_tile_index[1] * TileX, input_block_tile_index[2] * TileY, }; // Compute block flat index against input dims. int input_origin_block_flat_index = FlatTensorIndex(block_tile_index_in_input, input_dims); bool full_tile = true; int tile_width = TileY; // Last row is not full. if (input_block_tile_index[2] == tile_aligned_input_dim[2] - 1) { tile_width = input_dims[2] - (tile_aligned_input_dim[2] - 1) * TileY; full_tile &= false; } int tile_height = TileX; if (input_block_tile_index[1] == tile_aligned_input_dim[1] - 1) { tile_height = input_dims[1] - (tile_aligned_input_dim[1] - 1) * TileX; full_tile &= false; } constexpr int in_effective_thread_num = NumThreads / TileY * TileY; if (x < in_effective_thread_num) { // Read a tile from input using block. int x_i = x / TileY; int x_j = x % TileY; int input_ind = input_origin_block_flat_index + x_i * input_dims[2] + x_j; int input_inc = BlockReadRows * input_dims[2]; if (full_tile) { #pragma unroll for (int ind_i = x_i; ind_i < (TileX); ind_i += BlockReadRows) { tile_sm[ind_i][x_j] = input[input_ind]; input_ind += input_inc; } } else { if (x_j < tile_width) { #pragma unroll for (int ind_i = x_i; ind_i < (tile_height); ind_i += BlockReadRows) { tile_sm[ind_i][x_j] = input[input_ind]; input_ind += input_inc; } } } } __syncthreads(); // Store sm value back to out Index3 output_block_tile_index = { input_block_tile_index[0], input_block_tile_index[2], input_block_tile_index[1], }; Index3 block_tile_index_in_output = { output_block_tile_index[0], output_block_tile_index[1] * TileY, output_block_tile_index[2] * TileX, }; int output_origin_block_flat_index = FlatTensorIndex(block_tile_index_in_output, output_dims); constexpr int out_effective_thread_num = NumThreads / TileX * TileX; if (x < out_effective_thread_num) { int x_i = x / TileX; int x_j = x % TileX; int output_ind = output_origin_block_flat_index + x_i * output_dims[2] + x_j; int output_inc = BlockWriteRows * output_dims[2]; if (full_tile) { #pragma unroll for (int ind_i = x_i; ind_i < (TileY); ind_i += BlockWriteRows) { output[output_ind] = tile_sm[x_j][ind_i]; output_ind += output_inc; } } else { if (x_j < tile_height) { #pragma unroll for (int ind_i = x_i; ind_i < (tile_width); ind_i += BlockWriteRows) { output[output_ind] = tile_sm[x_j][ind_i]; output_ind += output_inc; } } } } } // This function will find combination of long_side X short_side in backups template <int TSIZE> bool SelectProperTileSize(std::vector<std::pair<int, int>>* tiles) { PADDLE_ENFORCE_LE( TSIZE, 16, platform::errors::InvalidArgument( "The tile size should smaller than 16, but received is:%d.", TSIZE)); PADDLE_ENFORCE_EQ( (TSIZE & (TSIZE - 1)), 0, platform::errors::InvalidArgument( "Data types should be powers of 2, but reived size is:%d.", TSIZE)); const int kMaxLongSideLen = 1024; const int kMaxShortSideLen = 15; for (int long_side = 32; long_side <= kMaxLongSideLen; long_side *= 2) { for (int short_side = 2; short_side <= kMaxShortSideLen; short_side += 1) { if (CheckLongTileSize(long_side, short_side, TSIZE)) { tiles->push_back(std::make_pair(long_side, short_side)); if (short_side == 2) return true; break; } } } return false; } // Use system built in type template <int ByteSize> struct SystemElemType; template <> struct SystemElemType<1> { using type = uint8_t; }; template <> struct SystemElemType<2> { using type = uint16_t; }; template <> struct SystemElemType<4> { using type = uint32_t; }; template <> struct SystemElemType<8> { using type = uint64_t; }; template <> struct SystemElemType<16> { using type = float4; }; template <typename T, int tile_long, int tile_short> void LaunchNarrowDims2TransposeKernel(const platform::CUDADeviceContext& d, int tile_size_i, int tile_size_j, int total_tiles_count, const T* input, const Dim3& input_dims, T* output) { constexpr int NumThreads = tile_long; if (tile_size_i <= tile_long && tile_size_j <= tile_short) { hipLaunchKernelGGL(( TilingSwapDim1And2< T, NumThreads, tile_long, tile_short>), dim3(total_tiles_count), dim3(NumThreads), 0, d.stream(), input, input_dims, output); } else { hipLaunchKernelGGL(( TilingSwapDim1And2< T, NumThreads, tile_short, tile_long>), dim3(total_tiles_count), dim3(NumThreads), 0, d.stream(), input, input_dims, output); } } template <typename T, int tile_long, int tile_short, typename dummy = void> struct NarrowDims2TransposeDispatch { static void DoTranspose(const platform::CUDADeviceContext& d, int tile_size_i, int tile_size_j, int total_tiles_count, const T* input, const Dim3& input_dims, T* output) { PADDLE_ENFORCE_EQ( (tile_long & (tile_long - 1)), 0, platform::errors::InvalidArgument( "The length of the longer side of the tile should be power of 2." " But received value is:%d.", tile_long)); bool request_satisfied = ::max(tile_size_i, tile_size_j) <= tile_long && ::min(tile_size_i, tile_size_j) <= tile_short; if (request_satisfied) { LaunchNarrowDims2TransposeKernel<T, tile_long, tile_short>( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); return; } const bool long_side_request_not_satisfied = ::max(tile_size_i, tile_size_j) > tile_long; if (long_side_request_not_satisfied) { NarrowDims2TransposeDispatch<T, tile_long * 2, tile_short>::DoTranspose( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); } else { NarrowDims2TransposeDispatch<T, tile_long, tile_short + 1>::DoTranspose( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); } } }; // If Not long tile size, goto this function when compile. template <typename T, int tile_long, int tile_short> struct NarrowDims2TransposeDispatch< T, tile_long, tile_short, typename std::enable_if< CheckNonLongTileSize(tile_long, tile_short, sizeof(T)), void>::type> { static void DoTranspose(const platform::CUDADeviceContext& d, int tile_size_i, int tile_size_j, int total_tiles_count, const T* input, const Dim3& input_dims, T* output) { PADDLE_ENFORCE_EQ( (tile_long & (tile_long - 1)), 0, platform::errors::InvalidArgument( "The length of the longer side of the tile should be power of 2." " But received value is:%d.", tile_long)); bool request_satisfied = ::max(tile_size_i, tile_size_j) <= tile_long && ::min(tile_size_i, tile_size_j) <= tile_short; if (request_satisfied) { LaunchNarrowDims2TransposeKernel<T, tile_long, tile_short>( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); return; } NarrowDims2TransposeDispatch<T, tile_long, tile_short + 1>::DoTranspose( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); } }; // If long tile size, goto this function when compile. template <typename T, int tile_long, int tile_short> struct NarrowDims2TransposeDispatch< T, tile_long, tile_short, typename std::enable_if<CheckLongTileSize(tile_long, tile_short, sizeof(T)), void>::type> { static void DoTranspose(const platform::CUDADeviceContext& d, int tile_size_i, int tile_size_j, int total_tiles_count, const T* input, const Dim3& input_dims, T* output) { PADDLE_ENFORCE_EQ( (tile_long & (tile_long - 1)), 0, platform::errors::InvalidArgument( "The length of the longer side of the tile should be power of 2," " but received is:%d.", tile_long)); LaunchNarrowDims2TransposeKernel<T, tile_long, tile_short>( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); } }; template <typename T, bool conjugate = false> void SwapDim1And2InNarrow(const platform::CUDADeviceContext& d, const T* input, const Dim3& input_dims, T* output, const int kMinTileSize) { // First get available tile sizes for the data type requested as backups std::vector<std::pair<int, int>> tile_sele; auto ret = SelectProperTileSize<sizeof(T)>(&tile_sele); PADDLE_ENFORCE_EQ( ret, true, platform::errors::InvalidArgument( "SelectProperTileSize should return true, but return value is:%d.", ret)); int tile_long_edge = 0; int tile_short_edge = 0; float lowest_cost = std::numeric_limits<float>::max(); int input_long_edge = ::max(input_dims[1], input_dims[2]); // Find the tile size that best suit in inputs. for (auto tile_size_pair : tile_sele) { int proposed_tile_long_edge = tile_size_pair.first; // data may not aligned to tile, so some threads wasted, we need // to find least wasted threads, which means we need to find tile // can split input properly, in another words: num_wasted_threads=0. int num_wasted_threads = input_long_edge - framework::CeilOrFloor<int, false>( input_long_edge, proposed_tile_long_edge) * proposed_tile_long_edge; int num_full_tiles = framework::CeilOrFloor<int, false>( input_long_edge, proposed_tile_long_edge); float cost = num_wasted_threads; if (cost <= lowest_cost) { tile_long_edge = proposed_tile_long_edge; tile_short_edge = tile_size_pair.second; lowest_cost = cost; } // break as we already find best tile size. if (cost == 0) break; } // The tile size we select should be match with input dim, long side to long // short side to short. // First set long side as i if dim1 > Tile min size, then set dim2 as j. int select_tile_size_i = input_dims[1] >= kMinTileSize ? tile_long_edge : input_dims[1]; int select_tile_size_j = input_dims[1] >= kMinTileSize ? input_dims[2] : tile_long_edge; // Check if i is long edge, if not set i as short. select_tile_size_i = select_tile_size_i == tile_long_edge ? tile_long_edge : ::min(select_tile_size_i, tile_short_edge); // Check if j is long edge, if not set j as short. select_tile_size_j = select_tile_size_j == tile_long_edge ? tile_long_edge : ::min(select_tile_size_j, tile_short_edge); // Here finally get proper long X short tile size. Dim3 input_dims_aligned = { input_dims[0], framework::CeilOrFloor<int, true>(input_dims[1], select_tile_size_i), framework::CeilOrFloor<int, true>(input_dims[2], select_tile_size_j), }; int total_tiles_count = input_dims_aligned[0] * input_dims_aligned[1] * input_dims_aligned[2]; // Suppose T can be replaced by system builtin types using ElemType = typename SystemElemType<sizeof(T)>::type; NarrowDims2TransposeDispatch<ElemType, 32, 2>::DoTranspose( d, select_tile_size_i, select_tile_size_j, total_tiles_count, reinterpret_cast<const ElemType*>(input), input_dims, reinterpret_cast<ElemType*>(output)); } // This is for case that cannot do coalescing read and write. // Or input is too small to split into tiles. template <typename T, int pos0, int pos1, int pos2> __global__ void TransposeSimpleKernel(int nthreads, const T* __restrict__ input, Dim3 input_dims, T* __restrict__ output) { Dim3 output_dims; output_dims[pos0] = input_dims[0]; output_dims[pos1] = input_dims[1]; output_dims[pos2] = input_dims[2]; CUDA_KERNEL_LOOP(output_index, nthreads) { Index3 output_tensor_index = ConvertTensorIndex(output_index, output_dims); Index3 input_tensor_index; input_tensor_index[0] = output_tensor_index[pos0]; input_tensor_index[1] = output_tensor_index[pos1]; input_tensor_index[2] = output_tensor_index[pos2]; int input_index = FlatTensorIndex(input_tensor_index, input_dims); output[output_index] = input[input_index]; } } // Here suppose convert all tensor to dim3, so just change dim1 and 2. template <typename T> void SendSwapDim1And2InTranspose(const platform::CUDADeviceContext& d, const T* input, const Dim3& input_dims, T* output) { // Suppose tile size > 16 static const int kMinTileSize = 16; static const int kMinNarrowTileSize = 96; bool large_tile = input_dims[1] >= kMinTileSize && input_dims[2] >= kMinTileSize; bool narrow_tile = input_dims[1] >= kMinNarrowTileSize || input_dims[2] >= kMinNarrowTileSize; if (large_tile) { // If input is large square, such as 32X32, use SM to do copy. // suppose 32 X 32 gives best performance, and 8 warp in block. constexpr int kTileSize = 32; constexpr int kNumThreads = 256; Dim3 input_dims_aligned = { input_dims[0], framework::CeilOrFloor<int, true>(input_dims[1], kTileSize), framework::CeilOrFloor<int, true>(input_dims[2], kTileSize), }; int total_tiles_count = input_dims_aligned[0] * input_dims_aligned[1] * input_dims_aligned[2]; hipLaunchKernelGGL(( TilingSwapDim1And2< T, kNumThreads, kTileSize, kTileSize>), dim3(total_tiles_count), dim3(kNumThreads), 0, d.stream(), input, input_dims, output); } else if (narrow_tile) { // If input shape is like Rect, such as 2X100, use Narrow tile size. // It makes things complicated, because need to find a tile can coverr // input and also reach best coalescing. SwapDim1And2InNarrow<T>(d, input, input_dims, output, kMinTileSize); } else { // If input shape is small, such as 8X8, just do simple copy int total_elements = input_dims[0] * input_dims[1] * input_dims[2]; auto config = GetGpuLaunchConfig1D(d, total_elements); hipLaunchKernelGGL(( TransposeSimpleKernel<T, 0, 2, 1>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, d.stream(), total_elements, input, input_dims, output); } } template <typename T> struct SwapDim1And2InTranspose { typedef platform::CUDADeviceContext Device; void operator()(const Device& d, const T* in, const std::vector<int>& combined_dims, T* out) { Dim3 input_dims = {static_cast<int>(combined_dims[0]), static_cast<int>(combined_dims[1]), static_cast<int>(combined_dims[2])}; SendSwapDim1And2InTranspose<T>(d, in, input_dims, out); } }; template <typename T> struct SwapDim0And2InTranspose { typedef platform::CUDADeviceContext Device; void operator()(const Device& d, const T* in, const std::vector<int>& combined_dims, T* out) { Dim3 input_dims = {static_cast<int>(combined_dims[0]), static_cast<int>(combined_dims[1]), static_cast<int>(combined_dims[2])}; size_t total_size = combined_dims[0] * combined_dims[1] * combined_dims[2]; auto config = GetGpuLaunchConfig1D(d, total_size); hipLaunchKernelGGL(( TransposeSimpleKernel<T, 2, 1, 0>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, d.stream(), total_size, in, input_dims, out); } }; // This function is to combine dimension. fox example: // (0, 1, 3, 2) --> (0, 2, 1) inline void CombineTransposeDim3(const framework::DDim& shape, const std::vector<int>& perm, std::vector<int>* new_perm, framework::DDim* new_dims) { PADDLE_ENFORCE_EQ(shape.size(), perm.size(), platform::errors::InvalidArgument( " shape should have the save dim with perm, but" " received shape size is:%d, perm size is:%d.", shape.size(), perm.size())); std::vector<int> dim_vec; if (shape.size() == 1) { // If input dimension is already 1, no need to combine dim. new_perm->resize(1); (*new_perm)[0] = perm[0]; dim_vec.push_back(shape[0]); *new_dims = framework::make_ddim(dim_vec); return; } std::vector<int> new_dim_pos(shape.size(), -1); std::vector<int> combined_dims(shape.size(), 0); int cur_head = perm[0]; new_dim_pos[cur_head] = 0; combined_dims[0] = shape[cur_head]; int dim_idx = 0; for (int perm_idx = 1; perm_idx < shape.size(); ++perm_idx) { // combine consecutive dimensions. if (cur_head + 1 == perm[perm_idx]) { cur_head = perm[perm_idx]; combined_dims[dim_idx] *= shape[cur_head]; } else { // Else start a new dimension. cur_head = perm[perm_idx]; dim_idx++; new_dim_pos[cur_head] = dim_idx; combined_dims[dim_idx] = shape[cur_head]; } } new_perm->resize(dim_idx + 1); dim_idx = 0; for (int i = 0; i < new_dim_pos.size(); ++i) { if (new_dim_pos[i] >= 0) { int new_perm_idx = new_dim_pos[i]; (*new_perm)[dim_idx] = new_perm_idx; dim_vec.push_back(combined_dims[new_perm_idx]); dim_idx++; } } *new_dims = framework::make_ddim(dim_vec); } template <typename T> struct TransposeSimple { static bool run(const platform::CUDADeviceContext& ctx, const Tensor& in, const std::vector<int32_t> perm, Tensor* out) { // First reduce the dimensions of the input tensor if possible. std::vector<int> new_perm; framework::DDim new_dims; CombineTransposeDim3(in.dims(), perm, &new_perm, &new_dims); // Only use tile copy GPU kernel when dimension is 2 or 3. int dims = new_dims.size(); std::vector<int> new_dim_vec = framework::vectorize<int>(new_dims); if (dims < 2 || dims > 3) return false; auto in_data = in.data<T>(); auto out_data = out->data<T>(); // In most cases, dim will not greater than 3 after combine. switch (dims) { case 2: if (new_perm[0] == 1 && new_perm[1] == 0) { // Add the first dimension size as 1. new_dim_vec.insert(new_dim_vec.begin(), 1); SwapDim1And2InTranspose<T>()(ctx, in_data, new_dim_vec, out_data); return true; } break; case 3: // In this case, suppose we can do coalescing read and write in tile. if (new_perm == std::vector<int>({0, 2, 1})) { SwapDim1And2InTranspose<T>()(ctx, in_data, new_dim_vec, out_data); return true; } else if (new_perm == std::vector<int>({2, 1, 0})) { // Maybe can optimize later, find a way to do coalescing memory copy. // But I think it depends on the data size. If span is not large, // maybe // can do coalescing. SwapDim0And2InTranspose<T>()(ctx, in_data, new_dim_vec, out_data); return true; } else { return false; } break; default: return false; } return false; } }; template <typename DeviceContext, typename T> class TransposeGPUKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.InputVar("X"); auto* out = context.OutputVar("Out"); const framework::Tensor* x_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*x); framework::Tensor* out_tensor = GetMutableLoDTensorOrSelectedRowsValueFromVar(out); out_tensor->mutable_data<T>(context.GetPlace()); if (out_tensor->numel() == 0) { return; } std::vector<int> axis = context.Attr<std::vector<int>>("axis"); int ndims = axis.size(); const auto& dev_ctx = context.template device_context<DeviceContext>(); auto ret = TransposeSimple<T>::run(dev_ctx, *x_tensor, axis, out_tensor); if (!ret) { TransCompute<DeviceContext, T>(ndims, dev_ctx, *x_tensor, out_tensor, axis); } } }; template <typename DeviceContext, typename T> class TransposeGradGPUKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* out_grad = context.InputVar(framework::GradVarName("Out")); auto* x_grad = context.OutputVar(framework::GradVarName("X")); if (!x_grad) { return; } const framework::Tensor* out_grad_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*out_grad); framework::Tensor* x_grad_tensor = GetMutableLoDTensorOrSelectedRowsValueFromVar(x_grad); x_grad_tensor->mutable_data<T>(context.GetPlace()); if (x_grad_tensor->numel() == 0) { return; } std::vector<int> axis = context.Attr<std::vector<int>>("axis"); std::vector<int> reversed_axis(axis); for (size_t i = 0; i < axis.size(); i++) { reversed_axis[axis[i]] = i; } int ndims = axis.size(); const auto& dev_ctx = context.template device_context<DeviceContext>(); auto ret = TransposeSimple<T>::run(dev_ctx, *out_grad_tensor, reversed_axis, x_grad_tensor); if (!ret) { TransCompute<DeviceContext, T>(ndims, dev_ctx, *out_grad_tensor, x_grad_tensor, reversed_axis); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( transpose, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, float>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, double>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( transpose_grad, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, float>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, double>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( transpose2, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, int32_t>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, float>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, double>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( transpose2_grad, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, int32_t>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, float>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, double>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, plat::float16>);
fe148be7664ca56cf07fce4377eeee0025b5568f.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <limits> #include <utility> #include "paddle/fluid/framework/gpu_utils.h" #include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using Dim3 = framework::Dim3; using Index3 = framework::Index3; struct EqualTo { constexpr bool operator()(int a, int b) const { return a == b; } }; struct GreaterThan { constexpr bool operator()(int a, int b) const { return a > b; } }; // Value can be decided in compile time. template <typename FUN, int INT_32 = 32> constexpr bool CheckProperTileSize(int tile_long, int tile_short, int size_T, FUN op) { return (size_T == 16 && ((tile_long == INT_32 && op(tile_short, 4)) || (tile_long == 2 * INT_32 && op(tile_short, 4)) || (tile_long == 4 * INT_32 && op(tile_short, 4)) || (tile_long == 8 * INT_32 && op(tile_short, 2)))) || (size_T == 8 && ((tile_long == INT_32 && op(tile_short, 15)) || (tile_long == 2 * INT_32 && op(tile_short, 15)) || (tile_long == 4 * INT_32 && op(tile_short, 8)) || (tile_long == 8 * INT_32 && op(tile_short, 4)) || (tile_long == 16 * INT_32 && op(tile_short, 2)))) || ((size_T == 4 || size_T == 2 || size_T == 1) && ((tile_long == INT_32 && op(tile_short, 15)) || (tile_long == 2 * INT_32 && op(tile_short, 15)) || (tile_long == 4 * INT_32 && op(tile_short, 8)) || (tile_long == 8 * INT_32 && op(tile_short, 4)) || (tile_long == 16 * INT_32 && op(tile_short, 2)) || (tile_long == 16 * INT_32 && op(tile_short, 2)))); } constexpr bool CheckLongTileSize(int tile_long, int tile_short, int size_T) { return CheckProperTileSize(tile_long, tile_short, size_T, EqualTo()); } constexpr bool CheckOutsideTileSize(int tile_long, int tile_short, int size_T) { return CheckProperTileSize(tile_long, tile_short, size_T, GreaterThan()); } constexpr bool CheckNonLongTileSize(int tile_long, int tile_short, int size_T) { return !CheckOutsideTileSize(tile_long, tile_short, size_T) && (CheckOutsideTileSize(tile_long * 2, tile_short, size_T) || CheckOutsideTileSize(tile_long, tile_short + 1, size_T)) && !CheckLongTileSize(tile_long, tile_short, size_T); } // Use SM to do data transfer, load a tile into SM then store out. // All tile read and write are colascing, so can speedup memory copy template <typename T, int NumThreads, int TileX, int TileY> __global__ void TilingSwapDim1And2(const T* __restrict__ input, Dim3 input_dims, T* __restrict__ output) { assert(blockDim.x == NumThreads); assert(blockDim.y == 1); assert(blockDim.z == 1); assert(gridDim.y == 1); assert(gridDim.z == 1); constexpr int BlockReadRows = NumThreads / TileY; constexpr int BlockWriteRows = NumThreads / TileX; // One extra line in the inner dimension to avoid share memory bank conflict. __shared__ __align__( alignof(T)) char share_mem_ptr[TileX * (TileY + 1) * sizeof(T)]; typedef T(*ShareMemory)[TileY + 1]; ShareMemory tile_sm = reinterpret_cast<ShareMemory>(share_mem_ptr); int x = threadIdx.x; Dim3 output_dims = { input_dims[0], input_dims[2], input_dims[1], }; // Align dim to Tiles Dim3 tile_aligned_input_dim = { input_dims[0], (input_dims[1] + TileX - 1) / TileX, (input_dims[2] + TileY - 1) / TileY, }; // Converts block idx to tile index, each block process a tile Index3 input_block_tile_index = ConvertTensorIndex(blockIdx.x, tile_aligned_input_dim); // Compute real index align to tile:0, 32, 64... Index3 block_tile_index_in_input = { input_block_tile_index[0], input_block_tile_index[1] * TileX, input_block_tile_index[2] * TileY, }; // Compute block flat index against input dims. int input_origin_block_flat_index = FlatTensorIndex(block_tile_index_in_input, input_dims); bool full_tile = true; int tile_width = TileY; // Last row is not full. if (input_block_tile_index[2] == tile_aligned_input_dim[2] - 1) { tile_width = input_dims[2] - (tile_aligned_input_dim[2] - 1) * TileY; full_tile &= false; } int tile_height = TileX; if (input_block_tile_index[1] == tile_aligned_input_dim[1] - 1) { tile_height = input_dims[1] - (tile_aligned_input_dim[1] - 1) * TileX; full_tile &= false; } constexpr int in_effective_thread_num = NumThreads / TileY * TileY; if (x < in_effective_thread_num) { // Read a tile from input using block. int x_i = x / TileY; int x_j = x % TileY; int input_ind = input_origin_block_flat_index + x_i * input_dims[2] + x_j; int input_inc = BlockReadRows * input_dims[2]; if (full_tile) { #pragma unroll for (int ind_i = x_i; ind_i < (TileX); ind_i += BlockReadRows) { tile_sm[ind_i][x_j] = input[input_ind]; input_ind += input_inc; } } else { if (x_j < tile_width) { #pragma unroll for (int ind_i = x_i; ind_i < (tile_height); ind_i += BlockReadRows) { tile_sm[ind_i][x_j] = input[input_ind]; input_ind += input_inc; } } } } __syncthreads(); // Store sm value back to out Index3 output_block_tile_index = { input_block_tile_index[0], input_block_tile_index[2], input_block_tile_index[1], }; Index3 block_tile_index_in_output = { output_block_tile_index[0], output_block_tile_index[1] * TileY, output_block_tile_index[2] * TileX, }; int output_origin_block_flat_index = FlatTensorIndex(block_tile_index_in_output, output_dims); constexpr int out_effective_thread_num = NumThreads / TileX * TileX; if (x < out_effective_thread_num) { int x_i = x / TileX; int x_j = x % TileX; int output_ind = output_origin_block_flat_index + x_i * output_dims[2] + x_j; int output_inc = BlockWriteRows * output_dims[2]; if (full_tile) { #pragma unroll for (int ind_i = x_i; ind_i < (TileY); ind_i += BlockWriteRows) { output[output_ind] = tile_sm[x_j][ind_i]; output_ind += output_inc; } } else { if (x_j < tile_height) { #pragma unroll for (int ind_i = x_i; ind_i < (tile_width); ind_i += BlockWriteRows) { output[output_ind] = tile_sm[x_j][ind_i]; output_ind += output_inc; } } } } } // This function will find combination of long_side X short_side in backups template <int TSIZE> bool SelectProperTileSize(std::vector<std::pair<int, int>>* tiles) { PADDLE_ENFORCE_LE( TSIZE, 16, platform::errors::InvalidArgument( "The tile size should smaller than 16, but received is:%d.", TSIZE)); PADDLE_ENFORCE_EQ( (TSIZE & (TSIZE - 1)), 0, platform::errors::InvalidArgument( "Data types should be powers of 2, but reived size is:%d.", TSIZE)); const int kMaxLongSideLen = 1024; const int kMaxShortSideLen = 15; for (int long_side = 32; long_side <= kMaxLongSideLen; long_side *= 2) { for (int short_side = 2; short_side <= kMaxShortSideLen; short_side += 1) { if (CheckLongTileSize(long_side, short_side, TSIZE)) { tiles->push_back(std::make_pair(long_side, short_side)); if (short_side == 2) return true; break; } } } return false; } // Use system built in type template <int ByteSize> struct SystemElemType; template <> struct SystemElemType<1> { using type = uint8_t; }; template <> struct SystemElemType<2> { using type = uint16_t; }; template <> struct SystemElemType<4> { using type = uint32_t; }; template <> struct SystemElemType<8> { using type = uint64_t; }; template <> struct SystemElemType<16> { using type = float4; }; template <typename T, int tile_long, int tile_short> void LaunchNarrowDims2TransposeKernel(const platform::CUDADeviceContext& d, int tile_size_i, int tile_size_j, int total_tiles_count, const T* input, const Dim3& input_dims, T* output) { constexpr int NumThreads = tile_long; if (tile_size_i <= tile_long && tile_size_j <= tile_short) { TilingSwapDim1And2< T, NumThreads, tile_long, tile_short><<<total_tiles_count, NumThreads, 0, d.stream()>>>( input, input_dims, output); } else { TilingSwapDim1And2< T, NumThreads, tile_short, tile_long><<<total_tiles_count, NumThreads, 0, d.stream()>>>( input, input_dims, output); } } template <typename T, int tile_long, int tile_short, typename dummy = void> struct NarrowDims2TransposeDispatch { static void DoTranspose(const platform::CUDADeviceContext& d, int tile_size_i, int tile_size_j, int total_tiles_count, const T* input, const Dim3& input_dims, T* output) { PADDLE_ENFORCE_EQ( (tile_long & (tile_long - 1)), 0, platform::errors::InvalidArgument( "The length of the longer side of the tile should be power of 2." " But received value is:%d.", tile_long)); bool request_satisfied = std::max(tile_size_i, tile_size_j) <= tile_long && std::min(tile_size_i, tile_size_j) <= tile_short; if (request_satisfied) { LaunchNarrowDims2TransposeKernel<T, tile_long, tile_short>( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); return; } const bool long_side_request_not_satisfied = std::max(tile_size_i, tile_size_j) > tile_long; if (long_side_request_not_satisfied) { NarrowDims2TransposeDispatch<T, tile_long * 2, tile_short>::DoTranspose( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); } else { NarrowDims2TransposeDispatch<T, tile_long, tile_short + 1>::DoTranspose( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); } } }; // If Not long tile size, goto this function when compile. template <typename T, int tile_long, int tile_short> struct NarrowDims2TransposeDispatch< T, tile_long, tile_short, typename std::enable_if< CheckNonLongTileSize(tile_long, tile_short, sizeof(T)), void>::type> { static void DoTranspose(const platform::CUDADeviceContext& d, int tile_size_i, int tile_size_j, int total_tiles_count, const T* input, const Dim3& input_dims, T* output) { PADDLE_ENFORCE_EQ( (tile_long & (tile_long - 1)), 0, platform::errors::InvalidArgument( "The length of the longer side of the tile should be power of 2." " But received value is:%d.", tile_long)); bool request_satisfied = std::max(tile_size_i, tile_size_j) <= tile_long && std::min(tile_size_i, tile_size_j) <= tile_short; if (request_satisfied) { LaunchNarrowDims2TransposeKernel<T, tile_long, tile_short>( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); return; } NarrowDims2TransposeDispatch<T, tile_long, tile_short + 1>::DoTranspose( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); } }; // If long tile size, goto this function when compile. template <typename T, int tile_long, int tile_short> struct NarrowDims2TransposeDispatch< T, tile_long, tile_short, typename std::enable_if<CheckLongTileSize(tile_long, tile_short, sizeof(T)), void>::type> { static void DoTranspose(const platform::CUDADeviceContext& d, int tile_size_i, int tile_size_j, int total_tiles_count, const T* input, const Dim3& input_dims, T* output) { PADDLE_ENFORCE_EQ( (tile_long & (tile_long - 1)), 0, platform::errors::InvalidArgument( "The length of the longer side of the tile should be power of 2," " but received is:%d.", tile_long)); LaunchNarrowDims2TransposeKernel<T, tile_long, tile_short>( d, tile_size_i, tile_size_j, total_tiles_count, input, input_dims, output); } }; template <typename T, bool conjugate = false> void SwapDim1And2InNarrow(const platform::CUDADeviceContext& d, const T* input, const Dim3& input_dims, T* output, const int kMinTileSize) { // First get available tile sizes for the data type requested as backups std::vector<std::pair<int, int>> tile_sele; auto ret = SelectProperTileSize<sizeof(T)>(&tile_sele); PADDLE_ENFORCE_EQ( ret, true, platform::errors::InvalidArgument( "SelectProperTileSize should return true, but return value is:%d.", ret)); int tile_long_edge = 0; int tile_short_edge = 0; float lowest_cost = std::numeric_limits<float>::max(); int input_long_edge = std::max(input_dims[1], input_dims[2]); // Find the tile size that best suit in inputs. for (auto tile_size_pair : tile_sele) { int proposed_tile_long_edge = tile_size_pair.first; // data may not aligned to tile, so some threads wasted, we need // to find least wasted threads, which means we need to find tile // can split input properly, in another words: num_wasted_threads=0. int num_wasted_threads = input_long_edge - framework::CeilOrFloor<int, false>( input_long_edge, proposed_tile_long_edge) * proposed_tile_long_edge; int num_full_tiles = framework::CeilOrFloor<int, false>( input_long_edge, proposed_tile_long_edge); float cost = num_wasted_threads; if (cost <= lowest_cost) { tile_long_edge = proposed_tile_long_edge; tile_short_edge = tile_size_pair.second; lowest_cost = cost; } // break as we already find best tile size. if (cost == 0) break; } // The tile size we select should be match with input dim, long side to long // short side to short. // First set long side as i if dim1 > Tile min size, then set dim2 as j. int select_tile_size_i = input_dims[1] >= kMinTileSize ? tile_long_edge : input_dims[1]; int select_tile_size_j = input_dims[1] >= kMinTileSize ? input_dims[2] : tile_long_edge; // Check if i is long edge, if not set i as short. select_tile_size_i = select_tile_size_i == tile_long_edge ? tile_long_edge : std::min(select_tile_size_i, tile_short_edge); // Check if j is long edge, if not set j as short. select_tile_size_j = select_tile_size_j == tile_long_edge ? tile_long_edge : std::min(select_tile_size_j, tile_short_edge); // Here finally get proper long X short tile size. Dim3 input_dims_aligned = { input_dims[0], framework::CeilOrFloor<int, true>(input_dims[1], select_tile_size_i), framework::CeilOrFloor<int, true>(input_dims[2], select_tile_size_j), }; int total_tiles_count = input_dims_aligned[0] * input_dims_aligned[1] * input_dims_aligned[2]; // Suppose T can be replaced by system builtin types using ElemType = typename SystemElemType<sizeof(T)>::type; NarrowDims2TransposeDispatch<ElemType, 32, 2>::DoTranspose( d, select_tile_size_i, select_tile_size_j, total_tiles_count, reinterpret_cast<const ElemType*>(input), input_dims, reinterpret_cast<ElemType*>(output)); } // This is for case that cannot do coalescing read and write. // Or input is too small to split into tiles. template <typename T, int pos0, int pos1, int pos2> __global__ void TransposeSimpleKernel(int nthreads, const T* __restrict__ input, Dim3 input_dims, T* __restrict__ output) { Dim3 output_dims; output_dims[pos0] = input_dims[0]; output_dims[pos1] = input_dims[1]; output_dims[pos2] = input_dims[2]; CUDA_KERNEL_LOOP(output_index, nthreads) { Index3 output_tensor_index = ConvertTensorIndex(output_index, output_dims); Index3 input_tensor_index; input_tensor_index[0] = output_tensor_index[pos0]; input_tensor_index[1] = output_tensor_index[pos1]; input_tensor_index[2] = output_tensor_index[pos2]; int input_index = FlatTensorIndex(input_tensor_index, input_dims); output[output_index] = input[input_index]; } } // Here suppose convert all tensor to dim3, so just change dim1 and 2. template <typename T> void SendSwapDim1And2InTranspose(const platform::CUDADeviceContext& d, const T* input, const Dim3& input_dims, T* output) { // Suppose tile size > 16 static const int kMinTileSize = 16; static const int kMinNarrowTileSize = 96; bool large_tile = input_dims[1] >= kMinTileSize && input_dims[2] >= kMinTileSize; bool narrow_tile = input_dims[1] >= kMinNarrowTileSize || input_dims[2] >= kMinNarrowTileSize; if (large_tile) { // If input is large square, such as 32X32, use SM to do copy. // suppose 32 X 32 gives best performance, and 8 warp in block. constexpr int kTileSize = 32; constexpr int kNumThreads = 256; Dim3 input_dims_aligned = { input_dims[0], framework::CeilOrFloor<int, true>(input_dims[1], kTileSize), framework::CeilOrFloor<int, true>(input_dims[2], kTileSize), }; int total_tiles_count = input_dims_aligned[0] * input_dims_aligned[1] * input_dims_aligned[2]; TilingSwapDim1And2< T, kNumThreads, kTileSize, kTileSize><<<total_tiles_count, kNumThreads, 0, d.stream()>>>( input, input_dims, output); } else if (narrow_tile) { // If input shape is like Rect, such as 2X100, use Narrow tile size. // It makes things complicated, because need to find a tile can coverr // input and also reach best coalescing. SwapDim1And2InNarrow<T>(d, input, input_dims, output, kMinTileSize); } else { // If input shape is small, such as 8X8, just do simple copy int total_elements = input_dims[0] * input_dims[1] * input_dims[2]; auto config = GetGpuLaunchConfig1D(d, total_elements); TransposeSimpleKernel<T, 0, 2, 1><<< config.block_per_grid.x, config.thread_per_block.x, 0, d.stream()>>>( total_elements, input, input_dims, output); } } template <typename T> struct SwapDim1And2InTranspose { typedef platform::CUDADeviceContext Device; void operator()(const Device& d, const T* in, const std::vector<int>& combined_dims, T* out) { Dim3 input_dims = {static_cast<int>(combined_dims[0]), static_cast<int>(combined_dims[1]), static_cast<int>(combined_dims[2])}; SendSwapDim1And2InTranspose<T>(d, in, input_dims, out); } }; template <typename T> struct SwapDim0And2InTranspose { typedef platform::CUDADeviceContext Device; void operator()(const Device& d, const T* in, const std::vector<int>& combined_dims, T* out) { Dim3 input_dims = {static_cast<int>(combined_dims[0]), static_cast<int>(combined_dims[1]), static_cast<int>(combined_dims[2])}; size_t total_size = combined_dims[0] * combined_dims[1] * combined_dims[2]; auto config = GetGpuLaunchConfig1D(d, total_size); TransposeSimpleKernel<T, 2, 1, 0><<< config.block_per_grid.x, config.thread_per_block.x, 0, d.stream()>>>( total_size, in, input_dims, out); } }; // This function is to combine dimension. fox example: // (0, 1, 3, 2) --> (0, 2, 1) inline void CombineTransposeDim3(const framework::DDim& shape, const std::vector<int>& perm, std::vector<int>* new_perm, framework::DDim* new_dims) { PADDLE_ENFORCE_EQ(shape.size(), perm.size(), platform::errors::InvalidArgument( " shape should have the save dim with perm, but" " received shape size is:%d, perm size is:%d.", shape.size(), perm.size())); std::vector<int> dim_vec; if (shape.size() == 1) { // If input dimension is already 1, no need to combine dim. new_perm->resize(1); (*new_perm)[0] = perm[0]; dim_vec.push_back(shape[0]); *new_dims = framework::make_ddim(dim_vec); return; } std::vector<int> new_dim_pos(shape.size(), -1); std::vector<int> combined_dims(shape.size(), 0); int cur_head = perm[0]; new_dim_pos[cur_head] = 0; combined_dims[0] = shape[cur_head]; int dim_idx = 0; for (int perm_idx = 1; perm_idx < shape.size(); ++perm_idx) { // combine consecutive dimensions. if (cur_head + 1 == perm[perm_idx]) { cur_head = perm[perm_idx]; combined_dims[dim_idx] *= shape[cur_head]; } else { // Else start a new dimension. cur_head = perm[perm_idx]; dim_idx++; new_dim_pos[cur_head] = dim_idx; combined_dims[dim_idx] = shape[cur_head]; } } new_perm->resize(dim_idx + 1); dim_idx = 0; for (int i = 0; i < new_dim_pos.size(); ++i) { if (new_dim_pos[i] >= 0) { int new_perm_idx = new_dim_pos[i]; (*new_perm)[dim_idx] = new_perm_idx; dim_vec.push_back(combined_dims[new_perm_idx]); dim_idx++; } } *new_dims = framework::make_ddim(dim_vec); } template <typename T> struct TransposeSimple { static bool run(const platform::CUDADeviceContext& ctx, const Tensor& in, const std::vector<int32_t> perm, Tensor* out) { // First reduce the dimensions of the input tensor if possible. std::vector<int> new_perm; framework::DDim new_dims; CombineTransposeDim3(in.dims(), perm, &new_perm, &new_dims); // Only use tile copy GPU kernel when dimension is 2 or 3. int dims = new_dims.size(); std::vector<int> new_dim_vec = framework::vectorize<int>(new_dims); if (dims < 2 || dims > 3) return false; auto in_data = in.data<T>(); auto out_data = out->data<T>(); // In most cases, dim will not greater than 3 after combine. switch (dims) { case 2: if (new_perm[0] == 1 && new_perm[1] == 0) { // Add the first dimension size as 1. new_dim_vec.insert(new_dim_vec.begin(), 1); SwapDim1And2InTranspose<T>()(ctx, in_data, new_dim_vec, out_data); return true; } break; case 3: // In this case, suppose we can do coalescing read and write in tile. if (new_perm == std::vector<int>({0, 2, 1})) { SwapDim1And2InTranspose<T>()(ctx, in_data, new_dim_vec, out_data); return true; } else if (new_perm == std::vector<int>({2, 1, 0})) { // Maybe can optimize later, find a way to do coalescing memory copy. // But I think it depends on the data size. If span is not large, // maybe // can do coalescing. SwapDim0And2InTranspose<T>()(ctx, in_data, new_dim_vec, out_data); return true; } else { return false; } break; default: return false; } return false; } }; template <typename DeviceContext, typename T> class TransposeGPUKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.InputVar("X"); auto* out = context.OutputVar("Out"); const framework::Tensor* x_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*x); framework::Tensor* out_tensor = GetMutableLoDTensorOrSelectedRowsValueFromVar(out); out_tensor->mutable_data<T>(context.GetPlace()); if (out_tensor->numel() == 0) { return; } std::vector<int> axis = context.Attr<std::vector<int>>("axis"); int ndims = axis.size(); const auto& dev_ctx = context.template device_context<DeviceContext>(); auto ret = TransposeSimple<T>::run(dev_ctx, *x_tensor, axis, out_tensor); if (!ret) { TransCompute<DeviceContext, T>(ndims, dev_ctx, *x_tensor, out_tensor, axis); } } }; template <typename DeviceContext, typename T> class TransposeGradGPUKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* out_grad = context.InputVar(framework::GradVarName("Out")); auto* x_grad = context.OutputVar(framework::GradVarName("X")); if (!x_grad) { return; } const framework::Tensor* out_grad_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*out_grad); framework::Tensor* x_grad_tensor = GetMutableLoDTensorOrSelectedRowsValueFromVar(x_grad); x_grad_tensor->mutable_data<T>(context.GetPlace()); if (x_grad_tensor->numel() == 0) { return; } std::vector<int> axis = context.Attr<std::vector<int>>("axis"); std::vector<int> reversed_axis(axis); for (size_t i = 0; i < axis.size(); i++) { reversed_axis[axis[i]] = i; } int ndims = axis.size(); const auto& dev_ctx = context.template device_context<DeviceContext>(); auto ret = TransposeSimple<T>::run(dev_ctx, *out_grad_tensor, reversed_axis, x_grad_tensor); if (!ret) { TransCompute<DeviceContext, T>(ndims, dev_ctx, *out_grad_tensor, x_grad_tensor, reversed_axis); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( transpose, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, float>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, double>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( transpose_grad, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, float>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, double>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( transpose2, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, int32_t>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, float>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, double>, ops::TransposeGPUKernel<paddle::platform::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( transpose2_grad, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, int32_t>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, float>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, double>, ops::TransposeGradGPUKernel<paddle::platform::CUDADeviceContext, plat::float16>);
dc94939a698340f991f0d1ead2a4a15261b42a68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "glog/logging.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/errors.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/autogen/memory_efficient_attention.h" #include "paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/gemm_kernel_utils.h" #include "paddle/phi/kernels/fusion/cutlass/memory_efficient_attention_utils.h" namespace phi { namespace fusion { namespace cutlass_internal { using gemm_kernel_utils::getMaximumSharedMemoryPerBlockKb; template <typename T, typename Context> void MemoryEfficientAttentionForwardKernel( const Context& ctx, const DenseTensor& query, const DenseTensor& key, const DenseTensor& value, const paddle::optional<DenseTensor>& bias, const paddle::optional<DenseTensor>& cu_seqlens_q, const paddle::optional<DenseTensor>& cu_seqlens_k, const paddle::optional<DenseTensor>& causal_diagonal, const paddle::optional<DenseTensor>& seqlen_k, const Scalar& max_seqlen_q, const Scalar& max_seqlen_k, const bool causal, const double dropout_p, const float scale, const bool is_test, DenseTensor* output, DenseTensor* logsumexp, DenseTensor* seed_and_offset) { int compute_capacity = ctx.GetComputeCapability(); const auto max_shmem = getMaximumSharedMemoryPerBlockKb(compute_capacity) * 1024; bool kernel_launched = false; auto max_seqlen_q_num = max_seqlen_q.to<uint64_t>(); auto max_seqlen_k_num = max_seqlen_k.to<uint64_t>(); auto launchKernel = [&](auto k_, auto kernel_fn) { using KernelType = decltype(k_); bool is_launched = kernel_launched; if (is_launched) { return; } using scalar_t = typename KernelType::scalar_t; bool use_dropout = (dropout_p != 0); if (!KernelType::kSupportsDropout && use_dropout) { VLOG(3) << "run in to use dropout" << use_dropout; return; } if (!KernelType::kSupportsBias && bias) { VLOG(3) << "run in to bias"; return; } const auto& v_dims = value.dims(); if (KernelType::kSingleValueIteration && KernelType::kKeysPerBlock < v_dims[3]) { VLOG(3) << "run in to value dim" << v_dims; return; } const auto& k_dims = key.dims(); const auto& q_dims = query.dims(); int64_t max_seqlen_q_tmp, max_seqlen_k_tmp; if (cu_seqlens_q) { max_seqlen_q_tmp = max_seqlen_q_num; max_seqlen_k_tmp = 0; // Will be set inside the kernel } else { max_seqlen_q_tmp = q_dims[1]; max_seqlen_k_tmp = k_dims[1]; } VLOG(3) << "max_seqlen_q_tmp " << max_seqlen_q_tmp; if ((q_dims[3] % KernelType::kAlignmentQ) || (k_dims[3] % KernelType::kAlignmentK) || (v_dims[3] % KernelType::kAlignmentV)) { VLOG(3) << "run in to query dim" << q_dims; VLOG(3) << "run in to key dim" << k_dims; return; } size_t smem_bytes = sizeof(typename KernelType::SharedStorage); if (smem_bytes > max_shmem) { VLOG(3) << "run in to shmem" << smem_bytes << " " << max_shmem; return; } kernel_launched = true; VLOG(3) << "launching"; output->Resize({q_dims[0], q_dims[1], q_dims[2], v_dims[3]}); constexpr int64_t kAlignLSE = KernelType::kAlignLSE; phi::Dim<3> logsumexp_dims; logsumexp_dims[0] = cu_seqlens_q ? cu_seqlens_q.get().dims()[0] - 1 : q_dims[0]; logsumexp_dims[1] = q_dims[2]; logsumexp_dims[2] = is_test ? 0 : (max_seqlen_q_tmp + kAlignLSE - 1) / kAlignLSE; logsumexp_dims[2] *= kAlignLSE; logsumexp->Resize(logsumexp_dims); ctx.template Alloc<float>(logsumexp); VLOG(3) << "logsumexp dims" << logsumexp_dims; VLOG(3) << "logsumexp" << logsumexp; VLOG(3) << "kAlignLSE" << kAlignLSE; typename KernelType::Params p; p.query_ptr = phi::SafeGetTensorPtr<scalar_t>(query); p.key_ptr = phi::SafeGetTensorPtr<scalar_t>(key); p.value_ptr = phi::SafeGetTensorPtr<scalar_t>(value); p.logsumexp_ptr = is_test ? nullptr : logsumexp->data<float>(); VLOG(3) << "logsumexp_ptr" << p.logsumexp_ptr; DenseTensor out_accum; if (KernelType::kNeedsOutputAccumulatorBuffer) { out_accum.Resize(output->dims()); p.output_accum_ptr = phi::SafeAllocTensor<typename KernelType::output_accum_t, Context>( ctx, &out_accum); VLOG(3) << "output_accum_ptr " << p.output_accum_ptr; } else { p.output_accum_ptr = nullptr; } p.output_ptr = phi::SafeAllocTensor<typename KernelType::output_t, Context>( ctx, output); VLOG(3) << "output_ptr " << p.output_ptr; if (cu_seqlens_q) { p.seqstart_q_ptr = phi::SafeGetTensorPtr<int32_t>(cu_seqlens_q); p.seqstart_k_ptr = phi::SafeGetTensorPtr<int32_t>(cu_seqlens_k); VLOG(3) << "seqstart_q_ptr " << p.seqstart_q_ptr; } else { p.seqstart_q_ptr = nullptr; p.seqstart_k_ptr = nullptr; } PD_MEA_CHECK_OVERFLOW(p.num_heads, q_dims[2]); PD_MEA_CHECK_OVERFLOW(p.head_dim, q_dims[3]); PD_MEA_CHECK_OVERFLOW(p.head_dim_value, v_dims[3]); PD_MEA_CHECK_OVERFLOW(p.num_queries, max_seqlen_q_tmp); PD_MEA_CHECK_OVERFLOW(p.num_keys, max_seqlen_k_tmp); PD_MEA_CHECK_OVERFLOW( p.num_batches, cu_seqlens_q ? cu_seqlens_q.get().dims()[0] - 1 : q_dims[0]); p.causal = causal; if (causal_diagonal) { p.causal_diagonal_ptr = phi::SafeGetTensorPtr<int32_t>(causal_diagonal); } else { p.causal_diagonal_ptr = nullptr; } VLOG(3) << "causal_diagonal_ptr " << p.causal_diagonal_ptr; p.seqlen_k_ptr = nullptr; if (seqlen_k) { p.seqlen_k_ptr = phi::SafeGetTensorPtr<int32_t>(seqlen_k); } else { p.seqlen_k_ptr = nullptr; } VLOG(3) << "seqlen_k_ptr " << p.seqlen_k_ptr; if (scale < 0) { p.scale = static_cast<float>(1.0 / std::sqrt(p.head_dim)); } else { p.scale = scale; } VLOG(3) << "scale " << p.scale; PD_MEA_CHECK_OVERFLOW(p.q_strideB, DimStride(query.dims(), 0)); PD_MEA_CHECK_OVERFLOW(p.k_strideB, DimStride(key.dims(), 0)); PD_MEA_CHECK_OVERFLOW(p.v_strideB, DimStride(value.dims(), 0)); PD_MEA_CHECK_OVERFLOW(p.q_strideM, DimStride(query.dims(), 1)); PD_MEA_CHECK_OVERFLOW(p.k_strideM, DimStride(key.dims(), 1)); PD_MEA_CHECK_OVERFLOW(p.v_strideM, DimStride(value.dims(), 1)); PD_MEA_CHECK_OVERFLOW(p.q_strideH, DimStride(query.dims(), 2)); PD_MEA_CHECK_OVERFLOW(p.k_strideH, DimStride(key.dims(), 2)); PD_MEA_CHECK_OVERFLOW(p.v_strideH, DimStride(value.dims(), 2)); PD_MEA_CHECK_OVERFLOW(p.o_strideM, DimStride(output->dims(), 1)); if (bias) { p.attn_bias_ptr = phi::SafeGetTensorPtr<scalar_t>(bias); PD_MEA_CHECK_OVERFLOW( p.bias_strideB, GetMemoryEfficientBiasStrideB(bias.get().dims(), q_dims, k_dims)); PD_MEA_CHECK_OVERFLOW(p.bias_strideH, q_dims[1] * k_dims[1]); PD_MEA_CHECK_OVERFLOW(p.bias_strideM, k_dims[1]); } else { p.attn_bias_ptr = nullptr; } VLOG(3) << "attn_bias_ptr " << p.attn_bias_ptr; VLOG(3) << "bias_strideB " << p.bias_strideB; VLOG(3) << "bias_strideH " << p.bias_strideH; VLOG(3) << "bias_strideM " << p.bias_strideM; phi::Dim<1> seed_dims; seed_dims[0] = 2; seed_and_offset->Resize(seed_dims); ctx.template HostAlloc<int64_t>(seed_and_offset); int64_t* seed_and_offset_ptr = phi::SafeGetTensorPtr<int64_t>(seed_and_offset); auto gen = ctx.GetGenerator(); uint64_t inc = query.dims()[0] * query.dims()[2] * 32; auto seed_offset_pair = gen->IncrementOffset(inc); auto seed = (seed_offset_pair.first); auto offset = (seed_offset_pair.second); seed_and_offset_ptr[0] = (int64_t)seed; seed_and_offset_ptr[1] = (int64_t)offset; VLOG(3) << "seed and offset: " << seed << " " << offset << " " << seed_and_offset_ptr; p.use_dropout = use_dropout; if (use_dropout) { p.seed = seed; p.offset = offset; p.dropout_prob = dropout_p; } else { p.dropout_prob = 0.0; } if (smem_bytes > 0xc000) { const void* kernel_fn_void_ptr = reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(kernel_fn)); PADDLE_ENFORCE_GPU_SUCCESS( hipFuncSetAttribute(kernel_fn_void_ptr, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes)); } KernelType::check_supported(p); VLOG(3) << "Kernel launched with func : " << typeid(kernel_fn).name() << " block dim " << p.getBlocksGrid() << " thread dim " << p.getThreadsGrid(); hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, ctx.stream(), p); }; dispatch_cutlass_forward<T>(ctx, launchKernel); PADDLE_ENFORCE_EQ( kernel_launched, true, phi::errors::InvalidArgument("the kernel should not be launched")); } } // namespace cutlass_internal } // namespace fusion } // namespace phi PD_REGISTER_KERNEL( memory_efficient_attention, GPU, ALL_LAYOUT, phi::fusion::cutlass_internal::MemoryEfficientAttentionForwardKernel, float, phi::dtype::bfloat16, phi::dtype::float16) {}
dc94939a698340f991f0d1ead2a4a15261b42a68.cu
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "glog/logging.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/errors.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/autogen/memory_efficient_attention.h" #include "paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/gemm_kernel_utils.h" #include "paddle/phi/kernels/fusion/cutlass/memory_efficient_attention_utils.h" namespace phi { namespace fusion { namespace cutlass_internal { using gemm_kernel_utils::getMaximumSharedMemoryPerBlockKb; template <typename T, typename Context> void MemoryEfficientAttentionForwardKernel( const Context& ctx, const DenseTensor& query, const DenseTensor& key, const DenseTensor& value, const paddle::optional<DenseTensor>& bias, const paddle::optional<DenseTensor>& cu_seqlens_q, const paddle::optional<DenseTensor>& cu_seqlens_k, const paddle::optional<DenseTensor>& causal_diagonal, const paddle::optional<DenseTensor>& seqlen_k, const Scalar& max_seqlen_q, const Scalar& max_seqlen_k, const bool causal, const double dropout_p, const float scale, const bool is_test, DenseTensor* output, DenseTensor* logsumexp, DenseTensor* seed_and_offset) { int compute_capacity = ctx.GetComputeCapability(); const auto max_shmem = getMaximumSharedMemoryPerBlockKb(compute_capacity) * 1024; bool kernel_launched = false; auto max_seqlen_q_num = max_seqlen_q.to<uint64_t>(); auto max_seqlen_k_num = max_seqlen_k.to<uint64_t>(); auto launchKernel = [&](auto k_, auto kernel_fn) { using KernelType = decltype(k_); bool is_launched = kernel_launched; if (is_launched) { return; } using scalar_t = typename KernelType::scalar_t; bool use_dropout = (dropout_p != 0); if (!KernelType::kSupportsDropout && use_dropout) { VLOG(3) << "run in to use dropout" << use_dropout; return; } if (!KernelType::kSupportsBias && bias) { VLOG(3) << "run in to bias"; return; } const auto& v_dims = value.dims(); if (KernelType::kSingleValueIteration && KernelType::kKeysPerBlock < v_dims[3]) { VLOG(3) << "run in to value dim" << v_dims; return; } const auto& k_dims = key.dims(); const auto& q_dims = query.dims(); int64_t max_seqlen_q_tmp, max_seqlen_k_tmp; if (cu_seqlens_q) { max_seqlen_q_tmp = max_seqlen_q_num; max_seqlen_k_tmp = 0; // Will be set inside the kernel } else { max_seqlen_q_tmp = q_dims[1]; max_seqlen_k_tmp = k_dims[1]; } VLOG(3) << "max_seqlen_q_tmp " << max_seqlen_q_tmp; if ((q_dims[3] % KernelType::kAlignmentQ) || (k_dims[3] % KernelType::kAlignmentK) || (v_dims[3] % KernelType::kAlignmentV)) { VLOG(3) << "run in to query dim" << q_dims; VLOG(3) << "run in to key dim" << k_dims; return; } size_t smem_bytes = sizeof(typename KernelType::SharedStorage); if (smem_bytes > max_shmem) { VLOG(3) << "run in to shmem" << smem_bytes << " " << max_shmem; return; } kernel_launched = true; VLOG(3) << "launching"; output->Resize({q_dims[0], q_dims[1], q_dims[2], v_dims[3]}); constexpr int64_t kAlignLSE = KernelType::kAlignLSE; phi::Dim<3> logsumexp_dims; logsumexp_dims[0] = cu_seqlens_q ? cu_seqlens_q.get().dims()[0] - 1 : q_dims[0]; logsumexp_dims[1] = q_dims[2]; logsumexp_dims[2] = is_test ? 0 : (max_seqlen_q_tmp + kAlignLSE - 1) / kAlignLSE; logsumexp_dims[2] *= kAlignLSE; logsumexp->Resize(logsumexp_dims); ctx.template Alloc<float>(logsumexp); VLOG(3) << "logsumexp dims" << logsumexp_dims; VLOG(3) << "logsumexp" << logsumexp; VLOG(3) << "kAlignLSE" << kAlignLSE; typename KernelType::Params p; p.query_ptr = phi::SafeGetTensorPtr<scalar_t>(query); p.key_ptr = phi::SafeGetTensorPtr<scalar_t>(key); p.value_ptr = phi::SafeGetTensorPtr<scalar_t>(value); p.logsumexp_ptr = is_test ? nullptr : logsumexp->data<float>(); VLOG(3) << "logsumexp_ptr" << p.logsumexp_ptr; DenseTensor out_accum; if (KernelType::kNeedsOutputAccumulatorBuffer) { out_accum.Resize(output->dims()); p.output_accum_ptr = phi::SafeAllocTensor<typename KernelType::output_accum_t, Context>( ctx, &out_accum); VLOG(3) << "output_accum_ptr " << p.output_accum_ptr; } else { p.output_accum_ptr = nullptr; } p.output_ptr = phi::SafeAllocTensor<typename KernelType::output_t, Context>( ctx, output); VLOG(3) << "output_ptr " << p.output_ptr; if (cu_seqlens_q) { p.seqstart_q_ptr = phi::SafeGetTensorPtr<int32_t>(cu_seqlens_q); p.seqstart_k_ptr = phi::SafeGetTensorPtr<int32_t>(cu_seqlens_k); VLOG(3) << "seqstart_q_ptr " << p.seqstart_q_ptr; } else { p.seqstart_q_ptr = nullptr; p.seqstart_k_ptr = nullptr; } PD_MEA_CHECK_OVERFLOW(p.num_heads, q_dims[2]); PD_MEA_CHECK_OVERFLOW(p.head_dim, q_dims[3]); PD_MEA_CHECK_OVERFLOW(p.head_dim_value, v_dims[3]); PD_MEA_CHECK_OVERFLOW(p.num_queries, max_seqlen_q_tmp); PD_MEA_CHECK_OVERFLOW(p.num_keys, max_seqlen_k_tmp); PD_MEA_CHECK_OVERFLOW( p.num_batches, cu_seqlens_q ? cu_seqlens_q.get().dims()[0] - 1 : q_dims[0]); p.causal = causal; if (causal_diagonal) { p.causal_diagonal_ptr = phi::SafeGetTensorPtr<int32_t>(causal_diagonal); } else { p.causal_diagonal_ptr = nullptr; } VLOG(3) << "causal_diagonal_ptr " << p.causal_diagonal_ptr; p.seqlen_k_ptr = nullptr; if (seqlen_k) { p.seqlen_k_ptr = phi::SafeGetTensorPtr<int32_t>(seqlen_k); } else { p.seqlen_k_ptr = nullptr; } VLOG(3) << "seqlen_k_ptr " << p.seqlen_k_ptr; if (scale < 0) { p.scale = static_cast<float>(1.0 / std::sqrt(p.head_dim)); } else { p.scale = scale; } VLOG(3) << "scale " << p.scale; PD_MEA_CHECK_OVERFLOW(p.q_strideB, DimStride(query.dims(), 0)); PD_MEA_CHECK_OVERFLOW(p.k_strideB, DimStride(key.dims(), 0)); PD_MEA_CHECK_OVERFLOW(p.v_strideB, DimStride(value.dims(), 0)); PD_MEA_CHECK_OVERFLOW(p.q_strideM, DimStride(query.dims(), 1)); PD_MEA_CHECK_OVERFLOW(p.k_strideM, DimStride(key.dims(), 1)); PD_MEA_CHECK_OVERFLOW(p.v_strideM, DimStride(value.dims(), 1)); PD_MEA_CHECK_OVERFLOW(p.q_strideH, DimStride(query.dims(), 2)); PD_MEA_CHECK_OVERFLOW(p.k_strideH, DimStride(key.dims(), 2)); PD_MEA_CHECK_OVERFLOW(p.v_strideH, DimStride(value.dims(), 2)); PD_MEA_CHECK_OVERFLOW(p.o_strideM, DimStride(output->dims(), 1)); if (bias) { p.attn_bias_ptr = phi::SafeGetTensorPtr<scalar_t>(bias); PD_MEA_CHECK_OVERFLOW( p.bias_strideB, GetMemoryEfficientBiasStrideB(bias.get().dims(), q_dims, k_dims)); PD_MEA_CHECK_OVERFLOW(p.bias_strideH, q_dims[1] * k_dims[1]); PD_MEA_CHECK_OVERFLOW(p.bias_strideM, k_dims[1]); } else { p.attn_bias_ptr = nullptr; } VLOG(3) << "attn_bias_ptr " << p.attn_bias_ptr; VLOG(3) << "bias_strideB " << p.bias_strideB; VLOG(3) << "bias_strideH " << p.bias_strideH; VLOG(3) << "bias_strideM " << p.bias_strideM; phi::Dim<1> seed_dims; seed_dims[0] = 2; seed_and_offset->Resize(seed_dims); ctx.template HostAlloc<int64_t>(seed_and_offset); int64_t* seed_and_offset_ptr = phi::SafeGetTensorPtr<int64_t>(seed_and_offset); auto gen = ctx.GetGenerator(); uint64_t inc = query.dims()[0] * query.dims()[2] * 32; auto seed_offset_pair = gen->IncrementOffset(inc); auto seed = (seed_offset_pair.first); auto offset = (seed_offset_pair.second); seed_and_offset_ptr[0] = (int64_t)seed; seed_and_offset_ptr[1] = (int64_t)offset; VLOG(3) << "seed and offset: " << seed << " " << offset << " " << seed_and_offset_ptr; p.use_dropout = use_dropout; if (use_dropout) { p.seed = seed; p.offset = offset; p.dropout_prob = dropout_p; } else { p.dropout_prob = 0.0; } if (smem_bytes > 0xc000) { const void* kernel_fn_void_ptr = reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(kernel_fn)); PADDLE_ENFORCE_GPU_SUCCESS( cudaFuncSetAttribute(kernel_fn_void_ptr, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes)); } KernelType::check_supported(p); VLOG(3) << "Kernel launched with func : " << typeid(kernel_fn).name() << " block dim " << p.getBlocksGrid() << " thread dim " << p.getThreadsGrid(); kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, ctx.stream()>>>(p); }; dispatch_cutlass_forward<T>(ctx, launchKernel); PADDLE_ENFORCE_EQ( kernel_launched, true, phi::errors::InvalidArgument("the kernel should not be launched")); } } // namespace cutlass_internal } // namespace fusion } // namespace phi PD_REGISTER_KERNEL( memory_efficient_attention, GPU, ALL_LAYOUT, phi::fusion::cutlass_internal::MemoryEfficientAttentionForwardKernel, float, phi::dtype::bfloat16, phi::dtype::float16) {}
76652973d48ffd73ef9dd6d60bad68715a875f6a.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <iostream> #include <ctime> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // Stops underlining of __global__ #include <device_launch_parameters.h> // Stops underlining of threadIdx etc. using namespace std; __global__ void FindClosestGPU(float3* points, int* indices, int count) { if(count <= 1) return; int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < count) { float3 thisPoint = points[idx]; float smallestSoFar = 3.40282e38f; for(int i = 0; i < count; i++) { if(i == idx) continue; float dist = (thisPoint.x - points[i].x)*(thisPoint.x - points[i].x); dist += (thisPoint.y - points[i].y)*(thisPoint.y - points[i].y); dist += (thisPoint.z - points[i].z)*(thisPoint.z - points[i].z); if(dist < smallestSoFar) { smallestSoFar = dist; indices[idx] = i; } } } } int main(int argc, char **argv) { cout<<"running GPU implementation"<<endl; // Number of points const int count = 10000; // Arrays of points int *indexOfClosest = new int[count]; float3 *points = new float3[count]; float3* d_points; // GPU version int* d_indexOfClosest; // Create a list of random points for(int i = 0; i < count; i++) { points[i].x = (float)((rand()%10000) - 5000); points[i].y = (float)((rand()%10000) - 5000); points[i].z = (float)((rand()%10000) - 5000); } hipMalloc(&d_points, sizeof(float3) * count); hipMemcpy(d_points, points, sizeof(float3) * count, hipMemcpyHostToDevice); hipMalloc(&d_indexOfClosest, sizeof(int) * count); // This variable is used to keep track of the fastest time so far long fastest = 1000000; // Run the algorithm 20 times for(int q = 0; q < 20; q++) { long startTime = clock(); // Run the algorithm //FindClosestCPU(points, indexOfClosest, count); hipLaunchKernelGGL(( FindClosestGPU), dim3((count / 320)+1), dim3(320), 0, 0, d_points, d_indexOfClosest, count); hipMemcpy(indexOfClosest, d_indexOfClosest, sizeof(int) * count, hipMemcpyDeviceToHost); long finishTime = clock(); cout<<q<<" "<<(finishTime - startTime)<<endl; // If that run was faster update the fastest time so far if((finishTime - startTime) < fastest) fastest = (finishTime - startTime); } // Print out the fastest time cout<<"Fastest time: "<<fastest<<endl; // Print the final results to screen cout<<"Final results:"<<endl; for(int i = 0; i < 10; i++) cout<<i<<"."<<indexOfClosest[i]<<endl; // Deallocate ram delete[] indexOfClosest; delete[] points; hipFree(d_points); hipFree(d_indexOfClosest); hipDeviceReset(); return 0; }
76652973d48ffd73ef9dd6d60bad68715a875f6a.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <iostream> #include <ctime> #include <cuda.h> #include <cuda_runtime.h> // Stops underlining of __global__ #include <device_launch_parameters.h> // Stops underlining of threadIdx etc. using namespace std; __global__ void FindClosestGPU(float3* points, int* indices, int count) { if(count <= 1) return; int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < count) { float3 thisPoint = points[idx]; float smallestSoFar = 3.40282e38f; for(int i = 0; i < count; i++) { if(i == idx) continue; float dist = (thisPoint.x - points[i].x)*(thisPoint.x - points[i].x); dist += (thisPoint.y - points[i].y)*(thisPoint.y - points[i].y); dist += (thisPoint.z - points[i].z)*(thisPoint.z - points[i].z); if(dist < smallestSoFar) { smallestSoFar = dist; indices[idx] = i; } } } } int main(int argc, char **argv) { cout<<"running GPU implementation"<<endl; // Number of points const int count = 10000; // Arrays of points int *indexOfClosest = new int[count]; float3 *points = new float3[count]; float3* d_points; // GPU version int* d_indexOfClosest; // Create a list of random points for(int i = 0; i < count; i++) { points[i].x = (float)((rand()%10000) - 5000); points[i].y = (float)((rand()%10000) - 5000); points[i].z = (float)((rand()%10000) - 5000); } cudaMalloc(&d_points, sizeof(float3) * count); cudaMemcpy(d_points, points, sizeof(float3) * count, cudaMemcpyHostToDevice); cudaMalloc(&d_indexOfClosest, sizeof(int) * count); // This variable is used to keep track of the fastest time so far long fastest = 1000000; // Run the algorithm 20 times for(int q = 0; q < 20; q++) { long startTime = clock(); // Run the algorithm //FindClosestCPU(points, indexOfClosest, count); FindClosestGPU<<<(count / 320)+1, 320>>>(d_points, d_indexOfClosest, count); cudaMemcpy(indexOfClosest, d_indexOfClosest, sizeof(int) * count, cudaMemcpyDeviceToHost); long finishTime = clock(); cout<<q<<" "<<(finishTime - startTime)<<endl; // If that run was faster update the fastest time so far if((finishTime - startTime) < fastest) fastest = (finishTime - startTime); } // Print out the fastest time cout<<"Fastest time: "<<fastest<<endl; // Print the final results to screen cout<<"Final results:"<<endl; for(int i = 0; i < 10; i++) cout<<i<<"."<<indexOfClosest[i]<<endl; // Deallocate ram delete[] indexOfClosest; delete[] points; cudaFree(d_points); cudaFree(d_indexOfClosest); cudaDeviceReset(); return 0; }
c491babeb0497fd107ad5f0e5c2949c9ef4919c4.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include <amgcl/backend/cuda.hpp> #include <amgcl/adapter/crs_tuple.hpp> #include <amgcl/make_solver.hpp> #include <amgcl/amg.hpp> #include <amgcl/coarsening/smoothed_aggregation.hpp> #include <amgcl/relaxation/spai0.hpp> #include <amgcl/solver/cg.hpp> #include <amgcl/profiler.hpp> #include "log_times.hpp" #include "argh.h" namespace amgcl { profiler<backend::cuda_clock> prof; } using amgcl::prof; //--------------------------------------------------------------------------- void assemble( int n, std::vector<int> &ptr, std::vector<int> &col, std::vector<double> &val ) { int n3 = n * n * n; ptr.clear(); ptr.reserve(n3 + 1); col.clear(); col.reserve(n3 * 7); val.clear(); val.reserve(n3 * 7); ptr.push_back(0); for(int k = 0, idx = 0; k < n; ++k) { for(int j = 0; j < n; ++j) { for (int i = 0; i < n; ++i, ++idx) { if (k > 0) { col.push_back(idx - n * n); val.push_back(-1.0/6.0); } if (j > 0) { col.push_back(idx - n); val.push_back(-1.0/6.0); } if (i > 0) { col.push_back(idx - 1); val.push_back(-1.0/6.0); } col.push_back(idx); val.push_back(1.0); if (i + 1 < n) { col.push_back(idx + 1); val.push_back(-1.0/6.0); } if (j + 1 < n) { col.push_back(idx + n); val.push_back(-1.0/6.0); } if (k + 1 < n) { col.push_back(idx + n * n); val.push_back(-1.0/6.0); } ptr.push_back(col.size()); } } } } //--------------------------------------------------------------------------- int main(int argc, char *argv[]) { using namespace amgcl; typedef backend::cuda<double> Backend; typedef make_solver< amg< Backend, coarsening::smoothed_aggregation, relaxation::spai0 >, solver::cg<Backend> > Solver; Backend::params bprm; hipsparseCreate(&bprm.cusparse_handle); Solver::params prm; prm.precond.coarsening.relax = 0.75; int n; argh::parser cmdl(argc, argv); cmdl({"n", "size"}, "150") >> n; int n3 = n * n * n; std::vector<int> ptr, col; std::vector<double> val; prof.tic("assemble"); assemble(n, ptr, col, val); prof.toc("assemble"); thrust::device_vector<double> f(n3, 1.0); thrust::device_vector<double> x(n3, 0.0); prof.tic("setup"); Solver solve(std::tie(n3, ptr, col, val), prm, bprm); double tm_setup = prof.toc("setup"); std::cout << solve << std::endl; int iters; double error; prof.tic("solve"); std::tie(iters, error) = solve(f, x); double tm_solve = prof.toc("solve"); std::cout << "iters: " << iters << std::endl << "error: " << error << std::endl << prof << std::endl; log_times("amgcl-cuda.txt", 1, n, iters, tm_setup, tm_solve); }
c491babeb0497fd107ad5f0e5c2949c9ef4919c4.cu
#include <iostream> #include <vector> #include <amgcl/backend/cuda.hpp> #include <amgcl/adapter/crs_tuple.hpp> #include <amgcl/make_solver.hpp> #include <amgcl/amg.hpp> #include <amgcl/coarsening/smoothed_aggregation.hpp> #include <amgcl/relaxation/spai0.hpp> #include <amgcl/solver/cg.hpp> #include <amgcl/profiler.hpp> #include "log_times.hpp" #include "argh.h" namespace amgcl { profiler<backend::cuda_clock> prof; } using amgcl::prof; //--------------------------------------------------------------------------- void assemble( int n, std::vector<int> &ptr, std::vector<int> &col, std::vector<double> &val ) { int n3 = n * n * n; ptr.clear(); ptr.reserve(n3 + 1); col.clear(); col.reserve(n3 * 7); val.clear(); val.reserve(n3 * 7); ptr.push_back(0); for(int k = 0, idx = 0; k < n; ++k) { for(int j = 0; j < n; ++j) { for (int i = 0; i < n; ++i, ++idx) { if (k > 0) { col.push_back(idx - n * n); val.push_back(-1.0/6.0); } if (j > 0) { col.push_back(idx - n); val.push_back(-1.0/6.0); } if (i > 0) { col.push_back(idx - 1); val.push_back(-1.0/6.0); } col.push_back(idx); val.push_back(1.0); if (i + 1 < n) { col.push_back(idx + 1); val.push_back(-1.0/6.0); } if (j + 1 < n) { col.push_back(idx + n); val.push_back(-1.0/6.0); } if (k + 1 < n) { col.push_back(idx + n * n); val.push_back(-1.0/6.0); } ptr.push_back(col.size()); } } } } //--------------------------------------------------------------------------- int main(int argc, char *argv[]) { using namespace amgcl; typedef backend::cuda<double> Backend; typedef make_solver< amg< Backend, coarsening::smoothed_aggregation, relaxation::spai0 >, solver::cg<Backend> > Solver; Backend::params bprm; cusparseCreate(&bprm.cusparse_handle); Solver::params prm; prm.precond.coarsening.relax = 0.75; int n; argh::parser cmdl(argc, argv); cmdl({"n", "size"}, "150") >> n; int n3 = n * n * n; std::vector<int> ptr, col; std::vector<double> val; prof.tic("assemble"); assemble(n, ptr, col, val); prof.toc("assemble"); thrust::device_vector<double> f(n3, 1.0); thrust::device_vector<double> x(n3, 0.0); prof.tic("setup"); Solver solve(std::tie(n3, ptr, col, val), prm, bprm); double tm_setup = prof.toc("setup"); std::cout << solve << std::endl; int iters; double error; prof.tic("solve"); std::tie(iters, error) = solve(f, x); double tm_solve = prof.toc("solve"); std::cout << "iters: " << iters << std::endl << "error: " << error << std::endl << prof << std::endl; log_times("amgcl-cuda.txt", 1, n, iters, tm_setup, tm_solve); }
9984b67573e9287c0898e04398de6596192259d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <limits> #include <vector> #include "paddle/fluid/operators/stack_op.h" #include "paddle/fluid/platform/gpu_launch_param_config.h" namespace plat = paddle::platform; namespace ops = paddle::operators; namespace paddle { namespace operators { template <typename T, typename IntType> __global__ void StackCUDAKernel(T** input_ptrs, int split_size, int rows, int cols, T* __restrict__ output) { IntType grid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; grid_x < cols; grid_x += blockDim.x * gridDim.x) { IntType grid_y = blockIdx.y * blockDim.y + threadIdx.y; IntType split = grid_x / split_size; const T* input_ptr = input_ptrs[split]; IntType col_offset = grid_x % split_size; #pragma unroll for (; grid_y < rows; grid_y += blockDim.y * gridDim.y) { output[grid_y * cols + grid_x] = input_ptr[grid_y * split_size + col_offset]; } } } template <typename T> class StackGPUKernel : public framework::OpKernel<T> { using Tensor = framework::LoDTensor; public: void Compute(const framework::ExecutionContext& ctx) const override { auto x = ctx.MultiInput<Tensor>("X"); auto* y = ctx.Output<Tensor>("Y"); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis += (x[0]->dims().size() + 1); int n = static_cast<int>(x.size()); auto* y_data = y->mutable_data<T>(ctx.GetPlace()); std::vector<const T*> x_datas(n); for (int i = 0; i < n; i++) { x_datas[i] = x[i]->data<T>(); } auto& dev_ctx = ctx.template device_context<plat::CUDADeviceContext>(); auto tmp_x_data = memory::Alloc(dev_ctx, x_datas.size() * sizeof(T*)); memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()), tmp_x_data->ptr(), platform::CPUPlace(), reinterpret_cast<void*>(x_datas.data()), x_datas.size() * sizeof(T*), dev_ctx.stream()); // Split x dim from axis to matrix int x_row = 1, x_col = 1; for (int i = 0; i < axis; ++i) { x_row *= x[0]->dims()[i]; } x_col = x[0]->numel() / x_row; int out_col = x_col * n; auto config = GetGpuLaunchConfig2D(dev_ctx, out_col, x_row); if (y->numel() < std::numeric_limits<int32_t>::max()) { hipLaunchKernelGGL(( StackCUDAKernel<T, int32_t>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), reinterpret_cast<T**>(tmp_x_data->ptr()), x_col, x_row, out_col, y_data); } else { hipLaunchKernelGGL(( StackCUDAKernel<T, int64_t>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), reinterpret_cast<T**>(tmp_x_data->ptr()), x_col, x_row, out_col, y_data); } } }; template <typename T, typename IntType> __global__ void UnStackCUDAKernel(const T* __restrict__ input, int pre_dim_size, int split_dim_size, int suf_dim_size, int num_split, T** output_ptrs) { assert(blockDim.y == 1); assert(blockDim.z == 1); // In this case they are equal assert(split_dim_size % num_split == 0); IntType size = pre_dim_size * split_dim_size * suf_dim_size; IntType each_dim_size = split_dim_size / num_split; for (IntType offset = blockIdx.x * blockDim.x + threadIdx.x; offset < size; offset += blockDim.x * gridDim.x) { IntType i = offset / (split_dim_size * suf_dim_size); IntType j = (offset % (split_dim_size * suf_dim_size)) / suf_dim_size; IntType k = offset % suf_dim_size; T* output = output_ptrs[j / each_dim_size]; IntType output_ind = i * each_dim_size * suf_dim_size + (j % each_dim_size) * suf_dim_size + k; *(output + output_ind) = input[offset]; } } template <typename T> class StackGradGPUKernel : public framework::OpKernel<T> { using Tensor = framework::LoDTensor; public: void Compute(const framework::ExecutionContext& ctx) const override { auto* dy = ctx.Input<Tensor>(framework::GradVarName("Y")); auto dx = ctx.MultiOutput<Tensor>(framework::GradVarName("X")); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis += dy->dims().size(); int n = dy->dims()[axis]; PADDLE_ENFORCE_EQ(n, dx.size(), platform::errors::InvalidArgument( "Output dx size should be equal to n, but" " received n is:%d dx size is:%d.", n, dx.size())); // dx is output, so save each data address, then copy each dy into dx_data std::vector<T*> outputs(n); auto out_var_names = ctx.OutputNames(framework::GradVarName("X")); for (size_t j = 0; j < dx.size(); ++j) { if (out_var_names[j] != framework::kEmptyVarName && dx[j]->numel() != 0UL) { T* ptr = dx[j]->mutable_data<T>(ctx.GetPlace()); outputs[j] = ptr; } else { outputs[j] = nullptr; } } auto dy_data = dy->data<T>(); // each dx should have same shape int dy_pre = 1, dy_suf = 1; auto dy_dims = dy->dims(); int split_dim = n; for (int i = 0; i < axis; ++i) { dy_pre *= dy_dims[i]; } dy_suf = dy->numel() / (split_dim * dy_pre); auto& dev_ctx = ctx.template device_context<plat::CUDADeviceContext>(); auto tmp_out_data = memory::Alloc(dev_ctx, outputs.size() * sizeof(T*)); memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()), tmp_out_data->ptr(), platform::CPUPlace(), reinterpret_cast<void*>(outputs.data()), outputs.size() * sizeof(T*), dev_ctx.stream()); auto config = GetGpuLaunchConfig1D(dev_ctx, dy_pre * split_dim * dy_suf); if (dy->numel() < std::numeric_limits<int32_t>::max()) { hipLaunchKernelGGL(( UnStackCUDAKernel< T, int32_t>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, dev_ctx.stream(), dy_data, dy_pre, split_dim, dy_suf, split_dim, reinterpret_cast<T**>(tmp_out_data->ptr())); } else { hipLaunchKernelGGL(( UnStackCUDAKernel< T, int64_t>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, dev_ctx.stream(), dy_data, dy_pre, split_dim, dy_suf, split_dim, reinterpret_cast<T**>(tmp_out_data->ptr())); } } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL(stack, ops::StackGPUKernel<float>, ops::StackGPUKernel<double>, ops::StackGPUKernel<int>, ops::StackGPUKernel<int64_t>, ops::StackGPUKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(stack_grad, ops::StackGradGPUKernel<float>, ops::StackGradGPUKernel<double>, ops::StackGradGPUKernel<int>, ops::StackGradGPUKernel<int64_t>, ops::StackGradGPUKernel<plat::float16>);
9984b67573e9287c0898e04398de6596192259d4.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <limits> #include <vector> #include "paddle/fluid/operators/stack_op.h" #include "paddle/fluid/platform/gpu_launch_param_config.h" namespace plat = paddle::platform; namespace ops = paddle::operators; namespace paddle { namespace operators { template <typename T, typename IntType> __global__ void StackCUDAKernel(T** input_ptrs, int split_size, int rows, int cols, T* __restrict__ output) { IntType grid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; grid_x < cols; grid_x += blockDim.x * gridDim.x) { IntType grid_y = blockIdx.y * blockDim.y + threadIdx.y; IntType split = grid_x / split_size; const T* input_ptr = input_ptrs[split]; IntType col_offset = grid_x % split_size; #pragma unroll for (; grid_y < rows; grid_y += blockDim.y * gridDim.y) { output[grid_y * cols + grid_x] = input_ptr[grid_y * split_size + col_offset]; } } } template <typename T> class StackGPUKernel : public framework::OpKernel<T> { using Tensor = framework::LoDTensor; public: void Compute(const framework::ExecutionContext& ctx) const override { auto x = ctx.MultiInput<Tensor>("X"); auto* y = ctx.Output<Tensor>("Y"); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis += (x[0]->dims().size() + 1); int n = static_cast<int>(x.size()); auto* y_data = y->mutable_data<T>(ctx.GetPlace()); std::vector<const T*> x_datas(n); for (int i = 0; i < n; i++) { x_datas[i] = x[i]->data<T>(); } auto& dev_ctx = ctx.template device_context<plat::CUDADeviceContext>(); auto tmp_x_data = memory::Alloc(dev_ctx, x_datas.size() * sizeof(T*)); memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()), tmp_x_data->ptr(), platform::CPUPlace(), reinterpret_cast<void*>(x_datas.data()), x_datas.size() * sizeof(T*), dev_ctx.stream()); // Split x dim from axis to matrix int x_row = 1, x_col = 1; for (int i = 0; i < axis; ++i) { x_row *= x[0]->dims()[i]; } x_col = x[0]->numel() / x_row; int out_col = x_col * n; auto config = GetGpuLaunchConfig2D(dev_ctx, out_col, x_row); if (y->numel() < std::numeric_limits<int32_t>::max()) { StackCUDAKernel<T, int32_t><<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>( reinterpret_cast<T**>(tmp_x_data->ptr()), x_col, x_row, out_col, y_data); } else { StackCUDAKernel<T, int64_t><<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>( reinterpret_cast<T**>(tmp_x_data->ptr()), x_col, x_row, out_col, y_data); } } }; template <typename T, typename IntType> __global__ void UnStackCUDAKernel(const T* __restrict__ input, int pre_dim_size, int split_dim_size, int suf_dim_size, int num_split, T** output_ptrs) { assert(blockDim.y == 1); assert(blockDim.z == 1); // In this case they are equal assert(split_dim_size % num_split == 0); IntType size = pre_dim_size * split_dim_size * suf_dim_size; IntType each_dim_size = split_dim_size / num_split; for (IntType offset = blockIdx.x * blockDim.x + threadIdx.x; offset < size; offset += blockDim.x * gridDim.x) { IntType i = offset / (split_dim_size * suf_dim_size); IntType j = (offset % (split_dim_size * suf_dim_size)) / suf_dim_size; IntType k = offset % suf_dim_size; T* output = output_ptrs[j / each_dim_size]; IntType output_ind = i * each_dim_size * suf_dim_size + (j % each_dim_size) * suf_dim_size + k; *(output + output_ind) = input[offset]; } } template <typename T> class StackGradGPUKernel : public framework::OpKernel<T> { using Tensor = framework::LoDTensor; public: void Compute(const framework::ExecutionContext& ctx) const override { auto* dy = ctx.Input<Tensor>(framework::GradVarName("Y")); auto dx = ctx.MultiOutput<Tensor>(framework::GradVarName("X")); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis += dy->dims().size(); int n = dy->dims()[axis]; PADDLE_ENFORCE_EQ(n, dx.size(), platform::errors::InvalidArgument( "Output dx size should be equal to n, but" " received n is:%d dx size is:%d.", n, dx.size())); // dx is output, so save each data address, then copy each dy into dx_data std::vector<T*> outputs(n); auto out_var_names = ctx.OutputNames(framework::GradVarName("X")); for (size_t j = 0; j < dx.size(); ++j) { if (out_var_names[j] != framework::kEmptyVarName && dx[j]->numel() != 0UL) { T* ptr = dx[j]->mutable_data<T>(ctx.GetPlace()); outputs[j] = ptr; } else { outputs[j] = nullptr; } } auto dy_data = dy->data<T>(); // each dx should have same shape int dy_pre = 1, dy_suf = 1; auto dy_dims = dy->dims(); int split_dim = n; for (int i = 0; i < axis; ++i) { dy_pre *= dy_dims[i]; } dy_suf = dy->numel() / (split_dim * dy_pre); auto& dev_ctx = ctx.template device_context<plat::CUDADeviceContext>(); auto tmp_out_data = memory::Alloc(dev_ctx, outputs.size() * sizeof(T*)); memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()), tmp_out_data->ptr(), platform::CPUPlace(), reinterpret_cast<void*>(outputs.data()), outputs.size() * sizeof(T*), dev_ctx.stream()); auto config = GetGpuLaunchConfig1D(dev_ctx, dy_pre * split_dim * dy_suf); if (dy->numel() < std::numeric_limits<int32_t>::max()) { UnStackCUDAKernel< T, int32_t><<<config.block_per_grid.x, config.thread_per_block.x, 0, dev_ctx.stream()>>>( dy_data, dy_pre, split_dim, dy_suf, split_dim, reinterpret_cast<T**>(tmp_out_data->ptr())); } else { UnStackCUDAKernel< T, int64_t><<<config.block_per_grid.x, config.thread_per_block.x, 0, dev_ctx.stream()>>>( dy_data, dy_pre, split_dim, dy_suf, split_dim, reinterpret_cast<T**>(tmp_out_data->ptr())); } } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL(stack, ops::StackGPUKernel<float>, ops::StackGPUKernel<double>, ops::StackGPUKernel<int>, ops::StackGPUKernel<int64_t>, ops::StackGPUKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(stack_grad, ops::StackGradGPUKernel<float>, ops::StackGradGPUKernel<double>, ops::StackGradGPUKernel<int>, ops::StackGradGPUKernel<int64_t>, ops::StackGradGPUKernel<plat::float16>);
4f4d6cd133101fee7de127e27663f33c7ca04a7c.hip
// !!! This is a file automatically generated by hipify!!! /* * ===================================================================================== * * Filename: lud.cu * * Description: The main wrapper for the suite * * Version: 1.0 * Created: 10/22/2009 08:40:34 PM * Revision: none * Compiler: gcc * * Author: Liang Wang (lw2aw), [email protected] * Company: CS@UVa * * ===================================================================================== */ // CAROL-RADIATION radiation benchmark implementation - <caio.b.lunardi at gmail.com> - 2018 #include <hip/hip_runtime.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> #include <random> #include <hip/hip_fp16.h> #include "half.hpp" // helper functions #include "helper_string.h" #include "helper_cuda.h" #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #ifdef LOGS #include "log_helper.h" #endif #include "hlud_kernel.hip" #define DEFAULT_INPUT_SIZE 8192 int verbose = 0; int fault_injection = 0; int k=0; // k x k matrix size int matrixSize=0; // = k * k matrix size int iterations=100000000; // global loop iteracion bool generate=false; bool generator_debug=false; #define GENERATOR_MAXABSVALUE 2.0 #define GENERATOR_MINABSVALUE 0 typedef half tested_type; typedef half_float::half tested_type_host; //================== Input paths char *gold_matrix_path, *input_matrix_path; FILE* f_INPUT; FILE* f_B; FILE* f_GOLD; //==================================== //================== Host and device matrix ptr's half_float::half *INPUT; half_float::half *B; half_float::half *GOLD; half *d_INPUT; half *d_OUTPUT; //==================================== void GetDevice(){ //================== Retrieve and set the default CUDA device hipDeviceProp_t prop; hipError_t teste; int count=0; teste = hipGetDeviceCount(&count); printf("\nGet Device Test: %s\n", hipGetErrorString(teste)); for (int i=0; i< count; i++) { hipGetDeviceProperties( &prop, i ); printf( "Name: %s\n", prop.name ); } int *ndevice; int dev = 0; ndevice = &dev; hipGetDevice(ndevice); hipSetDevice(0); hipGetDeviceProperties( &prop, 0 ); printf("\ndevice: %d %s\n", *ndevice, prop.name); } double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } void allocCudaMemory() { //================== CUDA error handlers hipError_t malloc; const char *erro; //==================================== malloc = hipMalloc( ( void** ) &d_INPUT, matrixSize * sizeof( half ) ); erro = hipGetErrorString(malloc); if(strcmp(erro, "no error") != 0) { #ifdef LOGS if (!generate) log_error_detail("error input"); end_log_file(); #endif exit(EXIT_FAILURE); } //mem allocate failure malloc = hipMalloc( ( void** ) &d_OUTPUT, matrixSize * sizeof( half ) ); erro = hipGetErrorString(malloc); if(strcmp(erro, "no error") != 0) { #ifdef LOGS if (!generate) log_error_detail("error output"); end_log_file(); #endif exit(EXIT_FAILURE);} //mem allocate failure } void copyCudaMemory() { //================== CUDA error handlers hipError_t mcpy; const char *erro; //==================================== mcpy = hipMemset(d_OUTPUT, 0, matrixSize * sizeof( half )); erro = hipGetErrorString(mcpy); if(strcmp(erro, "no error") != 0) { #ifdef LOGS if (!generate) log_error_detail("error gpu output load memset"); end_log_file(); #endif exit(EXIT_FAILURE);} //mem allocate failure mcpy = hipMemcpy( d_INPUT, INPUT, matrixSize * sizeof( half ), hipMemcpyHostToDevice ); // PUSH A erro = hipGetErrorString(mcpy); if(strcmp(erro, "no error") != 0) { #ifdef LOGS if (!generate) log_error_detail("error gpu load input"); end_log_file(); #endif exit(EXIT_FAILURE);} //mem allocate failure } void generateInputMatrix() { FILE *f_INPUT; half_float::half *h_INPUT; if (k==DEFAULT_INPUT_SIZE) { h_INPUT = INPUT; } else { h_INPUT = (tested_type_host*) malloc(DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE * sizeof(tested_type)); if (!h_INPUT) { printf("Could not alloc h_INPUT\n"); exit(EXIT_FAILURE); } } std::random_device rd; //Will be used to obtain a seed for the random number engine std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd() std::uniform_real_distribution<double> dis(-GENERATOR_MAXABSVALUE, GENERATOR_MAXABSVALUE); if (!generator_debug) { for (int i=0; i<DEFAULT_INPUT_SIZE; i++) { for (int j=0; j<DEFAULT_INPUT_SIZE; j++) { h_INPUT[i * DEFAULT_INPUT_SIZE + j] = (tested_type_host)dis(gen); } } } else { for (int i=0; i<DEFAULT_INPUT_SIZE; i++) { for (int j=0; j<DEFAULT_INPUT_SIZE; j++) { h_INPUT[i * DEFAULT_INPUT_SIZE + j] = (tested_type_host)2.0; } } } if (h_INPUT != INPUT) { memcpy(INPUT, h_INPUT, matrixSize * sizeof(tested_type)); } int numZeros; int numNans; int numInfs; // printf("Write\n"); f_INPUT = fopen(input_matrix_path, "wb"); if (!f_INPUT) { printf("Could not open f_INPUT\n"); exit(EXIT_FAILURE); } tested_type_host val; numZeros = 0; numNans = 0; numInfs = 0; for (int i = 0; i<DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE; i++) { val=h_INPUT[i]; if (val == 0) numZeros++; if (isnan(val)) numNans++; if (isinf(val)) numInfs++; } printf("Number of zeros/NaNs/INFs on matrix INPUT: %d/%d/%d\n", numZeros, numNans, numInfs); for(int i=0; i<DEFAULT_INPUT_SIZE; i++) { fwrite(&(h_INPUT[i * DEFAULT_INPUT_SIZE]), sizeof(tested_type) * DEFAULT_INPUT_SIZE, 1, f_INPUT); } printf("Element 32 of matrix A: %f\n", (double)INPUT[32]); printf("Done\n"); fclose(f_INPUT); if (h_INPUT != INPUT) { free(h_INPUT); } return; } void writeGoldToFile(half_float::half *m) { if (!(f_GOLD = fopen(gold_matrix_path, "wb"))) { printf("Error: Could not open gold file in wb mode. %s\n", gold_matrix_path); exit(EXIT_FAILURE); } else { size_t ret_value = 0; for (int i = 0; i < k; i++) { ret_value = fwrite(&(m[i * k]), k * sizeof( half ), 1, f_GOLD); if (ret_value != 1) { printf("Failure writing to gold: %d\n", ret_value); exit(EXIT_FAILURE); } } fclose(f_GOLD); } } void ReadMatrixFromFile(){ //================== Read inputs to HOST memory int i; if (verbose) printf("Reading matrices... "); double time = mysecond(); f_INPUT = fopen(input_matrix_path,"rb"); if (generate && !f_INPUT) { generateInputMatrix(); } f_INPUT = fopen(input_matrix_path,"rb"); if (f_INPUT) { // open input successful size_t ret_value; for(i=0; i<k; i++) { ret_value = fread (&(INPUT[ k * i ]), sizeof( half )*k, 1, f_INPUT); if (ret_value != 1) { printf("Bad input formatting: %lu .\n", ret_value); #ifdef LOGS log_error_detail("Bad input formatting."); end_log_file(); #endif exit(EXIT_FAILURE); } } fclose(f_INPUT); } else { printf ("Cant open matrices and -generate is false.\n"); #ifdef LOGS log_error_detail("Cant open matrices"); end_log_file(); #endif exit(EXIT_FAILURE); } if (!generate) { size_t ret_value; f_GOLD = fopen(gold_matrix_path,"rb"); for(i=0; i<k; i++) { ret_value = fread (&(GOLD[ k * i ]), sizeof( half )*k, 1, f_GOLD); if (ret_value != 1) { printf("Bad gold formatting: %lu .\n", ret_value); #ifdef LOGS log_error_detail("Bad gold formatting."); end_log_file(); #endif exit(EXIT_FAILURE); } } fclose(f_GOLD); } if (verbose) printf("Done reading matrices in %.2fs\n", mysecond() - time); if (fault_injection) { INPUT[515] = half_float::half(0.2); printf("!! Injected 0.2 on position INPUT[515]\n"); } } // bool badass_memcmp(half_float::half *gold, half_float::half *found, unsigned long n){ // half_float::half result(0.0); // int i; // unsigned long chunk = ceil(double(n) / double(omp_get_max_threads())); // // printf("size %d max threads %d chunk %d\n", n, omp_get_max_threads(), chunk); // double time = mysecond(); // #pragma omp parallel for default(shared) private(i) schedule(static,chunk) reduction(+:result) // for (i=0; i < n; i++) // result = result + (gold[i] - found[i]); // // printf("comparing took %lf seconds, diff %lf\n", mysecond() - time, result); // if (fabs(result) > 0.0000000001) // return true; // return false; // } bool badass_memcmp(half_float::half *gold, half_float::half *found, unsigned long n) { bool flag = false; #pragma omp parallel for shared(flag) for (int i=0; i < n; i++) { if (found[i] != gold[i]) { flag = true; } } return flag; } void usage() { printf("Usage: hlud -size=N [-generate] [-input=<path>] [-gold=<path>] [-iterations=N] [-verbose] [-no-warmup]\n"); } int main( int argc, char* argv[] ) { //================== CUDA error handlers hipError_t mcpy; const char *erro; //==================================== //================== Test vars int i, j, loop2; // int kernel_errors=0; // int zero = 0; double time; double kernel_time, global_time; double total_kernel_time, min_kernel_time, max_kernel_time; int device_warmup = 1; // int gpu_check = 1; //==================================== //================== Read test parameters if (argc<2) { usage(); exit (-1); } if (checkCmdLineFlag(argc, (const char **)argv, "size")) { k = getCmdLineArgumentInt(argc, (const char **)argv, "size"); if ((k <= 0)||(k % 16 != 0)) { printf("Invalid input size given on the command-line: %d\n", k); exit(EXIT_FAILURE); } matrixSize = k * k; } else { usage(); exit(EXIT_FAILURE); } if (checkCmdLineFlag(argc, (const char **)argv, "input")) { getCmdLineArgumentString(argc, (const char **)argv, "input", &input_matrix_path); } else { input_matrix_path = new char[100]; snprintf(input_matrix_path, 100, "hlud_input_%i.matrix", (signed int)DEFAULT_INPUT_SIZE); printf("Using default input path: %s\n", input_matrix_path); } if (checkCmdLineFlag(argc, (const char **)argv, "gold")) { getCmdLineArgumentString(argc, (const char **)argv, "gold", &gold_matrix_path); } else { gold_matrix_path = new char[100]; snprintf(gold_matrix_path, 100, "hlud_gold_%i.matrix", (signed int)k); printf("Using default gold path: %s\n", gold_matrix_path); } if (checkCmdLineFlag(argc, (const char **)argv, "iterations")) { iterations = getCmdLineArgumentInt(argc, (const char **)argv, "iterations"); } if (checkCmdLineFlag(argc, (const char **)argv, "verbose")) { verbose = 1; } if (checkCmdLineFlag(argc, (const char **)argv, "debug")) { fault_injection = 1; printf("!! Will be injected an input error\n"); } if (checkCmdLineFlag(argc, (const char **)argv, "no-warmup")) { device_warmup = 0; printf("!! The first iteration may not reflect real timing information\n"); } if (checkCmdLineFlag(argc, (const char **)argv, "generate")) { generate = 1; device_warmup = 0; iterations = 1; printf("Will generate input if needed and GOLD.\nIterations setted to 1. no-warmup setted to false.\n"); } else { generate = 0; } if (checkCmdLineFlag(argc, (const char **) argv, "generator_debug")) { if (generate) { generator_debug = true; } else { printf("!! generator_debug ignored: generate is not activated. active with -generate.\n"); } } // if (checkCmdLineFlag(argc, (const char **)argv, "no-gpu-gold-check")) // { // gpu_check = 0; // } else { // printf("!! The gold check will happen on the GPU and fall back to CPU in case of errors\n"); // } //==================================== //================== Init logs #ifdef LOGS char test_info[90]; snprintf(test_info, 90, "size:%d type:half-precision", k); if (!generate) start_log_file("cudahlud", test_info); #endif //==================================== //================== Alloc HOST memory INPUT = ( half_float::half* ) malloc( matrixSize * sizeof( half ) ); GOLD = ( half_float::half* ) malloc( matrixSize * sizeof( half ) ); if (!(INPUT && GOLD)) { printf("Failed on host malloc.\n"); exit(-3); } //==================================== //================== Init test environment // kernel_errors=0; total_kernel_time = 0; min_kernel_time = UINT_MAX; max_kernel_time = 0; GetDevice(); ReadMatrixFromFile(); printf( "cudahlud\n" ); fflush(stdout); //==================================== //================== Init DEVICE memory allocCudaMemory(); copyCudaMemory(); //==================================== for(loop2=0; loop2<iterations; loop2++) {//================== Global test loop if (!loop2 && device_warmup) printf("First iteration: device warmup. Please wait...\n"); // Timer... global_time = mysecond(); hipMemset(d_OUTPUT, 0, matrixSize * sizeof( half )); if (verbose) printf(","); kernel_time = mysecond(); #ifdef LOGS if (loop2 || !device_warmup) if (!generate) start_iteration(); #endif //================== Device computation, HMxM lud_cuda(d_INPUT, k); checkCudaErrors( hipPeekAtLastError() ); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors( hipPeekAtLastError() ); //==================================== #ifdef LOGS if (loop2 || !device_warmup) if (!generate) end_iteration(); #endif kernel_time = mysecond() - kernel_time; if (loop2 || !device_warmup) { total_kernel_time += kernel_time; min_kernel_time = min(min_kernel_time, kernel_time); max_kernel_time = max(max_kernel_time, kernel_time); } if (loop2 || !device_warmup) if (verbose) printf("Device kernel time for iteration %d: %.3fs\n", loop2, kernel_time); if (verbose) printf(","); // Timer... time = mysecond(); //if (kernel_errors != 0) { checkCudaErrors( hipMemcpy(INPUT, d_OUTPUT, matrixSize * sizeof( half ), hipMemcpyDeviceToHost) ); if (generate) { writeGoldToFile(INPUT); } else if (loop2 || !device_warmup) { //~ if (memcmp(A, GOLD, sizeof(half_float::half) * k*k)) { if (badass_memcmp(GOLD, INPUT, matrixSize)) { char error_detail[150]; int host_errors = 0; printf("!"); #pragma omp parallel for for(i=0; (i<k); i++) { for(j=0; (j<k); j++) { if (INPUT[i + k * j] != GOLD[i + k * j]) #pragma omp critical { snprintf(error_detail, 150, "p: [%d, %d], r: %1.16e, e: %1.16e", i, j, (double)(INPUT[i + k * j]), (double)(GOLD[i + k * j])); if (verbose && (host_errors < 10)) printf("%s\n", error_detail); #ifdef LOGS if (!generate) log_error_detail(error_detail); #endif host_errors++; //ea++; //fprintf(file, "\n p: [%d, %d], r: %1.16e, e: %1.16e, error: %d\n", i, j, A[i + k * j], GOLD[i + k * j], t_ea); } } } // printf("numErrors:%d", host_errors); #ifdef LOGS if (!generate) log_error_count(host_errors); #endif //================== Release device memory to ensure there is no corrupted data on the inputs of the next iteration hipFree( d_INPUT ); hipFree( d_OUTPUT ); //==================================== ReadMatrixFromFile(); //================== Init DEVICE memory allocCudaMemory(); copyCudaMemory(); //==================================== } } //==================================== //================== Console hearthbeat /*if(kernel_errors > 0 || (loop2 % 10 == 0)) { printf("test number: %d\n", loop2); printf(" kernel time: %f\n", kernel_time); } else {*/ printf("."); fflush(stdout); //} //==================================== if (loop2 || !device_warmup) if (verbose) printf("Gold check time for iteration %d: %.3fs\n", loop2, mysecond() - time); if (loop2 || !device_warmup) if (verbose) { /////////// PERF double outputpersec = (double)matrixSize/kernel_time; printf("SIZE:%d OUTPUT/S:%f\n",k, outputpersec); /////////// } if (loop2 || !device_warmup) if (verbose) printf("Iteration #%d time: %.3fs\n\n\n", loop2, mysecond() - global_time); fflush(stdout); } double averageKernelTime = total_kernel_time / (iterations - (device_warmup ? 1 : 0)); printf("\n-- END --\n" "Total kernel time: %.3fs\n" "Iterations: %d\n" "Average kernel time: %.3fs (best: %.3fs ; worst: %.3fs)\n", total_kernel_time, iterations, averageKernelTime, min_kernel_time, max_kernel_time); //================== Release device memory hipFree( d_INPUT ); hipFree( d_OUTPUT ); //==================================== free( INPUT ); free( GOLD ); #ifdef LOGS if (!generate) end_log_file(); #endif return 0; }
4f4d6cd133101fee7de127e27663f33c7ca04a7c.cu
/* * ===================================================================================== * * Filename: lud.cu * * Description: The main wrapper for the suite * * Version: 1.0 * Created: 10/22/2009 08:40:34 PM * Revision: none * Compiler: gcc * * Author: Liang Wang (lw2aw), [email protected] * Company: CS@UVa * * ===================================================================================== */ // CAROL-RADIATION radiation benchmark implementation - <caio.b.lunardi at gmail.com> - 2018 #include <cuda.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> #include <random> #include <cuda_fp16.h> #include "half.hpp" // helper functions #include "helper_string.h" #include "helper_cuda.h" #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #ifdef LOGS #include "log_helper.h" #endif #include "hlud_kernel.cu" #define DEFAULT_INPUT_SIZE 8192 int verbose = 0; int fault_injection = 0; int k=0; // k x k matrix size int matrixSize=0; // = k * k matrix size int iterations=100000000; // global loop iteracion bool generate=false; bool generator_debug=false; #define GENERATOR_MAXABSVALUE 2.0 #define GENERATOR_MINABSVALUE 0 typedef half tested_type; typedef half_float::half tested_type_host; //================== Input paths char *gold_matrix_path, *input_matrix_path; FILE* f_INPUT; FILE* f_B; FILE* f_GOLD; //==================================== //================== Host and device matrix ptr's half_float::half *INPUT; half_float::half *B; half_float::half *GOLD; half *d_INPUT; half *d_OUTPUT; //==================================== void GetDevice(){ //================== Retrieve and set the default CUDA device cudaDeviceProp prop; cudaError_t teste; int count=0; teste = cudaGetDeviceCount(&count); printf("\nGet Device Test: %s\n", cudaGetErrorString(teste)); for (int i=0; i< count; i++) { cudaGetDeviceProperties( &prop, i ); printf( "Name: %s\n", prop.name ); } int *ndevice; int dev = 0; ndevice = &dev; cudaGetDevice(ndevice); cudaSetDevice(0); cudaGetDeviceProperties( &prop, 0 ); printf("\ndevice: %d %s\n", *ndevice, prop.name); } double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } void allocCudaMemory() { //================== CUDA error handlers cudaError_t malloc; const char *erro; //==================================== malloc = cudaMalloc( ( void** ) &d_INPUT, matrixSize * sizeof( half ) ); erro = cudaGetErrorString(malloc); if(strcmp(erro, "no error") != 0) { #ifdef LOGS if (!generate) log_error_detail("error input"); end_log_file(); #endif exit(EXIT_FAILURE); } //mem allocate failure malloc = cudaMalloc( ( void** ) &d_OUTPUT, matrixSize * sizeof( half ) ); erro = cudaGetErrorString(malloc); if(strcmp(erro, "no error") != 0) { #ifdef LOGS if (!generate) log_error_detail("error output"); end_log_file(); #endif exit(EXIT_FAILURE);} //mem allocate failure } void copyCudaMemory() { //================== CUDA error handlers cudaError_t mcpy; const char *erro; //==================================== mcpy = cudaMemset(d_OUTPUT, 0, matrixSize * sizeof( half )); erro = cudaGetErrorString(mcpy); if(strcmp(erro, "no error") != 0) { #ifdef LOGS if (!generate) log_error_detail("error gpu output load memset"); end_log_file(); #endif exit(EXIT_FAILURE);} //mem allocate failure mcpy = cudaMemcpy( d_INPUT, INPUT, matrixSize * sizeof( half ), cudaMemcpyHostToDevice ); // PUSH A erro = cudaGetErrorString(mcpy); if(strcmp(erro, "no error") != 0) { #ifdef LOGS if (!generate) log_error_detail("error gpu load input"); end_log_file(); #endif exit(EXIT_FAILURE);} //mem allocate failure } void generateInputMatrix() { FILE *f_INPUT; half_float::half *h_INPUT; if (k==DEFAULT_INPUT_SIZE) { h_INPUT = INPUT; } else { h_INPUT = (tested_type_host*) malloc(DEFAULT_INPUT_SIZE * DEFAULT_INPUT_SIZE * sizeof(tested_type)); if (!h_INPUT) { printf("Could not alloc h_INPUT\n"); exit(EXIT_FAILURE); } } std::random_device rd; //Will be used to obtain a seed for the random number engine std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd() std::uniform_real_distribution<double> dis(-GENERATOR_MAXABSVALUE, GENERATOR_MAXABSVALUE); if (!generator_debug) { for (int i=0; i<DEFAULT_INPUT_SIZE; i++) { for (int j=0; j<DEFAULT_INPUT_SIZE; j++) { h_INPUT[i * DEFAULT_INPUT_SIZE + j] = (tested_type_host)dis(gen); } } } else { for (int i=0; i<DEFAULT_INPUT_SIZE; i++) { for (int j=0; j<DEFAULT_INPUT_SIZE; j++) { h_INPUT[i * DEFAULT_INPUT_SIZE + j] = (tested_type_host)2.0; } } } if (h_INPUT != INPUT) { memcpy(INPUT, h_INPUT, matrixSize * sizeof(tested_type)); } int numZeros; int numNans; int numInfs; // printf("Write\n"); f_INPUT = fopen(input_matrix_path, "wb"); if (!f_INPUT) { printf("Could not open f_INPUT\n"); exit(EXIT_FAILURE); } tested_type_host val; numZeros = 0; numNans = 0; numInfs = 0; for (int i = 0; i<DEFAULT_INPUT_SIZE*DEFAULT_INPUT_SIZE; i++) { val=h_INPUT[i]; if (val == 0) numZeros++; if (isnan(val)) numNans++; if (isinf(val)) numInfs++; } printf("Number of zeros/NaNs/INFs on matrix INPUT: %d/%d/%d\n", numZeros, numNans, numInfs); for(int i=0; i<DEFAULT_INPUT_SIZE; i++) { fwrite(&(h_INPUT[i * DEFAULT_INPUT_SIZE]), sizeof(tested_type) * DEFAULT_INPUT_SIZE, 1, f_INPUT); } printf("Element 32 of matrix A: %f\n", (double)INPUT[32]); printf("Done\n"); fclose(f_INPUT); if (h_INPUT != INPUT) { free(h_INPUT); } return; } void writeGoldToFile(half_float::half *m) { if (!(f_GOLD = fopen(gold_matrix_path, "wb"))) { printf("Error: Could not open gold file in wb mode. %s\n", gold_matrix_path); exit(EXIT_FAILURE); } else { size_t ret_value = 0; for (int i = 0; i < k; i++) { ret_value = fwrite(&(m[i * k]), k * sizeof( half ), 1, f_GOLD); if (ret_value != 1) { printf("Failure writing to gold: %d\n", ret_value); exit(EXIT_FAILURE); } } fclose(f_GOLD); } } void ReadMatrixFromFile(){ //================== Read inputs to HOST memory int i; if (verbose) printf("Reading matrices... "); double time = mysecond(); f_INPUT = fopen(input_matrix_path,"rb"); if (generate && !f_INPUT) { generateInputMatrix(); } f_INPUT = fopen(input_matrix_path,"rb"); if (f_INPUT) { // open input successful size_t ret_value; for(i=0; i<k; i++) { ret_value = fread (&(INPUT[ k * i ]), sizeof( half )*k, 1, f_INPUT); if (ret_value != 1) { printf("Bad input formatting: %lu .\n", ret_value); #ifdef LOGS log_error_detail("Bad input formatting."); end_log_file(); #endif exit(EXIT_FAILURE); } } fclose(f_INPUT); } else { printf ("Cant open matrices and -generate is false.\n"); #ifdef LOGS log_error_detail("Cant open matrices"); end_log_file(); #endif exit(EXIT_FAILURE); } if (!generate) { size_t ret_value; f_GOLD = fopen(gold_matrix_path,"rb"); for(i=0; i<k; i++) { ret_value = fread (&(GOLD[ k * i ]), sizeof( half )*k, 1, f_GOLD); if (ret_value != 1) { printf("Bad gold formatting: %lu .\n", ret_value); #ifdef LOGS log_error_detail("Bad gold formatting."); end_log_file(); #endif exit(EXIT_FAILURE); } } fclose(f_GOLD); } if (verbose) printf("Done reading matrices in %.2fs\n", mysecond() - time); if (fault_injection) { INPUT[515] = half_float::half(0.2); printf("!! Injected 0.2 on position INPUT[515]\n"); } } // bool badass_memcmp(half_float::half *gold, half_float::half *found, unsigned long n){ // half_float::half result(0.0); // int i; // unsigned long chunk = ceil(double(n) / double(omp_get_max_threads())); // // printf("size %d max threads %d chunk %d\n", n, omp_get_max_threads(), chunk); // double time = mysecond(); // #pragma omp parallel for default(shared) private(i) schedule(static,chunk) reduction(+:result) // for (i=0; i < n; i++) // result = result + (gold[i] - found[i]); // // printf("comparing took %lf seconds, diff %lf\n", mysecond() - time, result); // if (fabs(result) > 0.0000000001) // return true; // return false; // } bool badass_memcmp(half_float::half *gold, half_float::half *found, unsigned long n) { bool flag = false; #pragma omp parallel for shared(flag) for (int i=0; i < n; i++) { if (found[i] != gold[i]) { flag = true; } } return flag; } void usage() { printf("Usage: hlud -size=N [-generate] [-input=<path>] [-gold=<path>] [-iterations=N] [-verbose] [-no-warmup]\n"); } int main( int argc, char* argv[] ) { //================== CUDA error handlers cudaError_t mcpy; const char *erro; //==================================== //================== Test vars int i, j, loop2; // int kernel_errors=0; // int zero = 0; double time; double kernel_time, global_time; double total_kernel_time, min_kernel_time, max_kernel_time; int device_warmup = 1; // int gpu_check = 1; //==================================== //================== Read test parameters if (argc<2) { usage(); exit (-1); } if (checkCmdLineFlag(argc, (const char **)argv, "size")) { k = getCmdLineArgumentInt(argc, (const char **)argv, "size"); if ((k <= 0)||(k % 16 != 0)) { printf("Invalid input size given on the command-line: %d\n", k); exit(EXIT_FAILURE); } matrixSize = k * k; } else { usage(); exit(EXIT_FAILURE); } if (checkCmdLineFlag(argc, (const char **)argv, "input")) { getCmdLineArgumentString(argc, (const char **)argv, "input", &input_matrix_path); } else { input_matrix_path = new char[100]; snprintf(input_matrix_path, 100, "hlud_input_%i.matrix", (signed int)DEFAULT_INPUT_SIZE); printf("Using default input path: %s\n", input_matrix_path); } if (checkCmdLineFlag(argc, (const char **)argv, "gold")) { getCmdLineArgumentString(argc, (const char **)argv, "gold", &gold_matrix_path); } else { gold_matrix_path = new char[100]; snprintf(gold_matrix_path, 100, "hlud_gold_%i.matrix", (signed int)k); printf("Using default gold path: %s\n", gold_matrix_path); } if (checkCmdLineFlag(argc, (const char **)argv, "iterations")) { iterations = getCmdLineArgumentInt(argc, (const char **)argv, "iterations"); } if (checkCmdLineFlag(argc, (const char **)argv, "verbose")) { verbose = 1; } if (checkCmdLineFlag(argc, (const char **)argv, "debug")) { fault_injection = 1; printf("!! Will be injected an input error\n"); } if (checkCmdLineFlag(argc, (const char **)argv, "no-warmup")) { device_warmup = 0; printf("!! The first iteration may not reflect real timing information\n"); } if (checkCmdLineFlag(argc, (const char **)argv, "generate")) { generate = 1; device_warmup = 0; iterations = 1; printf("Will generate input if needed and GOLD.\nIterations setted to 1. no-warmup setted to false.\n"); } else { generate = 0; } if (checkCmdLineFlag(argc, (const char **) argv, "generator_debug")) { if (generate) { generator_debug = true; } else { printf("!! generator_debug ignored: generate is not activated. active with -generate.\n"); } } // if (checkCmdLineFlag(argc, (const char **)argv, "no-gpu-gold-check")) // { // gpu_check = 0; // } else { // printf("!! The gold check will happen on the GPU and fall back to CPU in case of errors\n"); // } //==================================== //================== Init logs #ifdef LOGS char test_info[90]; snprintf(test_info, 90, "size:%d type:half-precision", k); if (!generate) start_log_file("cudahlud", test_info); #endif //==================================== //================== Alloc HOST memory INPUT = ( half_float::half* ) malloc( matrixSize * sizeof( half ) ); GOLD = ( half_float::half* ) malloc( matrixSize * sizeof( half ) ); if (!(INPUT && GOLD)) { printf("Failed on host malloc.\n"); exit(-3); } //==================================== //================== Init test environment // kernel_errors=0; total_kernel_time = 0; min_kernel_time = UINT_MAX; max_kernel_time = 0; GetDevice(); ReadMatrixFromFile(); printf( "cudahlud\n" ); fflush(stdout); //==================================== //================== Init DEVICE memory allocCudaMemory(); copyCudaMemory(); //==================================== for(loop2=0; loop2<iterations; loop2++) {//================== Global test loop if (!loop2 && device_warmup) printf("First iteration: device warmup. Please wait...\n"); // Timer... global_time = mysecond(); cudaMemset(d_OUTPUT, 0, matrixSize * sizeof( half )); if (verbose) printf(","); kernel_time = mysecond(); #ifdef LOGS if (loop2 || !device_warmup) if (!generate) start_iteration(); #endif //================== Device computation, HMxM lud_cuda(d_INPUT, k); checkCudaErrors( cudaPeekAtLastError() ); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors( cudaPeekAtLastError() ); //==================================== #ifdef LOGS if (loop2 || !device_warmup) if (!generate) end_iteration(); #endif kernel_time = mysecond() - kernel_time; if (loop2 || !device_warmup) { total_kernel_time += kernel_time; min_kernel_time = min(min_kernel_time, kernel_time); max_kernel_time = max(max_kernel_time, kernel_time); } if (loop2 || !device_warmup) if (verbose) printf("Device kernel time for iteration %d: %.3fs\n", loop2, kernel_time); if (verbose) printf(","); // Timer... time = mysecond(); //if (kernel_errors != 0) { checkCudaErrors( cudaMemcpy(INPUT, d_OUTPUT, matrixSize * sizeof( half ), cudaMemcpyDeviceToHost) ); if (generate) { writeGoldToFile(INPUT); } else if (loop2 || !device_warmup) { //~ if (memcmp(A, GOLD, sizeof(half_float::half) * k*k)) { if (badass_memcmp(GOLD, INPUT, matrixSize)) { char error_detail[150]; int host_errors = 0; printf("!"); #pragma omp parallel for for(i=0; (i<k); i++) { for(j=0; (j<k); j++) { if (INPUT[i + k * j] != GOLD[i + k * j]) #pragma omp critical { snprintf(error_detail, 150, "p: [%d, %d], r: %1.16e, e: %1.16e", i, j, (double)(INPUT[i + k * j]), (double)(GOLD[i + k * j])); if (verbose && (host_errors < 10)) printf("%s\n", error_detail); #ifdef LOGS if (!generate) log_error_detail(error_detail); #endif host_errors++; //ea++; //fprintf(file, "\n p: [%d, %d], r: %1.16e, e: %1.16e, error: %d\n", i, j, A[i + k * j], GOLD[i + k * j], t_ea); } } } // printf("numErrors:%d", host_errors); #ifdef LOGS if (!generate) log_error_count(host_errors); #endif //================== Release device memory to ensure there is no corrupted data on the inputs of the next iteration cudaFree( d_INPUT ); cudaFree( d_OUTPUT ); //==================================== ReadMatrixFromFile(); //================== Init DEVICE memory allocCudaMemory(); copyCudaMemory(); //==================================== } } //==================================== //================== Console hearthbeat /*if(kernel_errors > 0 || (loop2 % 10 == 0)) { printf("test number: %d\n", loop2); printf(" kernel time: %f\n", kernel_time); } else {*/ printf("."); fflush(stdout); //} //==================================== if (loop2 || !device_warmup) if (verbose) printf("Gold check time for iteration %d: %.3fs\n", loop2, mysecond() - time); if (loop2 || !device_warmup) if (verbose) { /////////// PERF double outputpersec = (double)matrixSize/kernel_time; printf("SIZE:%d OUTPUT/S:%f\n",k, outputpersec); /////////// } if (loop2 || !device_warmup) if (verbose) printf("Iteration #%d time: %.3fs\n\n\n", loop2, mysecond() - global_time); fflush(stdout); } double averageKernelTime = total_kernel_time / (iterations - (device_warmup ? 1 : 0)); printf("\n-- END --\n" "Total kernel time: %.3fs\n" "Iterations: %d\n" "Average kernel time: %.3fs (best: %.3fs ; worst: %.3fs)\n", total_kernel_time, iterations, averageKernelTime, min_kernel_time, max_kernel_time); //================== Release device memory cudaFree( d_INPUT ); cudaFree( d_OUTPUT ); //==================================== free( INPUT ); free( GOLD ); #ifdef LOGS if (!generate) end_log_file(); #endif return 0; }
d18edaf0d171a55686989e520f8d4396f439f5bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // CUDA Kernel function to add the elements of two arrays on the GPU __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] + y[i]; } } int main(int argc, char** argv) { int N = 1<<20; // 1M elements // Allocate Unified Memory -- accessible from CPU or GPU float *x, *y; hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
d18edaf0d171a55686989e520f8d4396f439f5bd.cu
#include <iostream> #include <math.h> // CUDA Kernel function to add the elements of two arrays on the GPU __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] + y[i]; } } int main(int argc, char** argv) { int N = 1<<20; // 1M elements // Allocate Unified Memory -- accessible from CPU or GPU float *x, *y; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
1bdcedd4ad8fa802cd6e13b7af832f5e304bbbcb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "multiplyMatrix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); float *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( multiplyMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( multiplyMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( multiplyMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1bdcedd4ad8fa802cd6e13b7af832f5e304bbbcb.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "multiplyMatrix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); float *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); multiplyMatrix<<<gridBlock,threadBlock>>>(a,b,c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { multiplyMatrix<<<gridBlock,threadBlock>>>(a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { multiplyMatrix<<<gridBlock,threadBlock>>>(a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
05f0eb66b36421da078671f4bf7d19440a68bcf4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "separableconv_cuda_kernel.cuh" #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/hip/HIPApplyUtils.cuh> #define min(a,b) ((a<b)?(a):(b)) #define max(a,b) ((a>b)?(a):(b)) #define DEBUG (0) #ifndef BLOCKDIMX #define BLOCKDIMX (32) #endif #ifndef BLOCKDIMY #define BLOCKDIMY (16) #endif using at::Half; //forward path of our layer template <typename scalar_t> __global__ void SeparableConvLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ input3, scalar_t* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w - filter_size + 1; const bool withinYbounds = h_i < h - filter_size + 1; const int batch_i = blockIdx.z; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { for ( int c_i = 0 ; c_i < channel ; c_i ++){ float out = 0.0f; for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { for (int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { float temp1 = input1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)]; float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ]; float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ]; out += temp1* temp2 * temp3; } } output[batch_i * output_b_stride + c_i* output_c_stride + h_i * output_h_stride + w_i ] = out; } } return ; } template <typename scalar_t> __global__ void SeparableConvLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ input3, const scalar_t* __restrict__ gradoutput, scalar_t* gradinput1, scalar_t* gradinput2, scalar_t* gradinput3 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w - filter_size + 1; const bool withinYbounds = h_i < h - filter_size + 1; const int batch_i = blockIdx.z; if(withinXbounds && withinYbounds){ for (int c_i = 0 ; c_i < channel ; c_i ++){ for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { float temp1 = input1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)]; float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ]; float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ]; float gradout = gradoutput[batch_i * output_b_stride + c_i* output_c_stride + h_i * output_h_stride + w_i ]; atomicAdd(&gradinput1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)], gradout * temp2 * temp3); atomicAdd(&gradinput2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ], gradout * temp1 * temp3); atomicAdd(&gradinput3 [batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ] , gradout * temp1 * temp2); } } } } return ; } int SeparableConvLayer_gpu_forward_kernel( hipStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch,const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, at::Tensor& input1, at::Tensor& input2, at::Tensor& input3, at::Tensor& output ) { int error = 1 ; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1 + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] {hipLaunchKernelGGL(( SeparableConvLayer_gpu_forward_kernelfunc), dim3(grid),dim3(block),0, stream , nElement, //to let the nummous w,h,channel, filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input1.data<scalar_t>(),input2.data<scalar_t>(),input3.data<scalar_t>(), output.data<scalar_t>() ); })); // THCudaCheck(hipGetLastError()); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } int SeparableConvLayer_gpu_backward_kernel( hipStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, at::Tensor& input1, at::Tensor& input2, at::Tensor& input3, at::Tensor& gradoutput, at::Tensor& gradinput1, at::Tensor& gradinput2, at::Tensor& gradinput3 ) { int error = 1 ; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1+ BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // hipMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float)); // hipMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float)); // hipMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float)); AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] {hipLaunchKernelGGL(( SeparableConvLayer_gpu_backward_kernelfunc) , dim3(grid),dim3(block),0, stream, nElement, //to let the nummous w,h,channel, filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input1.data<scalar_t>(), input2.data<scalar_t>(), input3.data<scalar_t>(), gradoutput.data<scalar_t>(), gradinput1.data<scalar_t>(), gradinput2.data<scalar_t>(), gradinput3.data<scalar_t>() ); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("gpuerror in BilinearSampler.updateGradInput %s\n", hipGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; }
05f0eb66b36421da078671f4bf7d19440a68bcf4.cu
#include <stdio.h> #include "separableconv_cuda_kernel.cuh" #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #define min(a,b) ((a<b)?(a):(b)) #define max(a,b) ((a>b)?(a):(b)) #define DEBUG (0) #ifndef BLOCKDIMX #define BLOCKDIMX (32) #endif #ifndef BLOCKDIMY #define BLOCKDIMY (16) #endif using at::Half; //forward path of our layer template <typename scalar_t> __global__ void SeparableConvLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ input3, scalar_t* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w - filter_size + 1; const bool withinYbounds = h_i < h - filter_size + 1; const int batch_i = blockIdx.z; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { for ( int c_i = 0 ; c_i < channel ; c_i ++){ float out = 0.0f; for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { for (int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { float temp1 = input1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)]; float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ]; float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ]; out += temp1* temp2 * temp3; } } output[batch_i * output_b_stride + c_i* output_c_stride + h_i * output_h_stride + w_i ] = out; } } return ; } template <typename scalar_t> __global__ void SeparableConvLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ input3, const scalar_t* __restrict__ gradoutput, scalar_t* gradinput1, scalar_t* gradinput2, scalar_t* gradinput3 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w - filter_size + 1; const bool withinYbounds = h_i < h - filter_size + 1; const int batch_i = blockIdx.z; if(withinXbounds && withinYbounds){ for (int c_i = 0 ; c_i < channel ; c_i ++){ for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { float temp1 = input1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)]; float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ]; float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ]; float gradout = gradoutput[batch_i * output_b_stride + c_i* output_c_stride + h_i * output_h_stride + w_i ]; atomicAdd(&gradinput1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)], gradout * temp2 * temp3); atomicAdd(&gradinput2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ], gradout * temp1 * temp3); atomicAdd(&gradinput3 [batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ] , gradout * temp1 * temp2); } } } } return ; } int SeparableConvLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch,const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, at::Tensor& input1, at::Tensor& input2, at::Tensor& input3, at::Tensor& output ) { int error = 1 ; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1 + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] { SeparableConvLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input1.data<scalar_t>(),input2.data<scalar_t>(),input3.data<scalar_t>(), output.data<scalar_t>() ); })); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } int SeparableConvLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, at::Tensor& input1, at::Tensor& input2, at::Tensor& input3, at::Tensor& gradoutput, at::Tensor& gradinput1, at::Tensor& gradinput2, at::Tensor& gradinput3 ) { int error = 1 ; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1+ BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // cudaMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float)); AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] { SeparableConvLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input1.data<scalar_t>(), input2.data<scalar_t>(), input3.data<scalar_t>(), gradoutput.data<scalar_t>(), gradinput1.data<scalar_t>(), gradinput2.data<scalar_t>(), gradinput3.data<scalar_t>() ); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; }
9cf41573242dbd59a4eaf02197e78d3fea80f146.hip
// !!! This is a file automatically generated by hipify!!! #include "svd.h" #include "based.h" void basedtsvd(float* t,const int m,const int n,const int tupe,float* U,float* S,float* V){ int bat = m*n; hipfftComplex* t_f = (hipfftComplex*)malloc(bat*tupe*sizeof(hipfftComplex)); //transform for(int i=0;i<bat;i++){ for(int j=0;j<tupe;j++){ t_f[i*tupe+j].x=t[j*bat+i]; t_f[i*tupe+j].y=0; } } //tfft:C2C hipfftComplex* d_fftData; hipMalloc((void**)&d_fftData,tupe*bat*sizeof(hipfftComplex)); hipMemcpy(d_fftData,t_f,bat*tupe*sizeof(hipfftComplex),hipMemcpyHostToDevice); hipfftHandle plan; if(hipfftPlan1d(&plan,tupe,HIPFFT_C2C,1) != HIPFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] hipfftPlan1d failed!",__FUNCTION__,__LINE__); return; } if(hipDeviceSynchronize() != hipSuccess){ fprintf(stdout,"[%s]:[%d] cuda syncthronize err!",__FUNCTION__,__LINE__); return; } for(int i=0;i<bat;i++){ if(hipfftExecC2C(plan,d_fftData+i*tupe,d_fftData+i*tupe,HIPFFT_FORWARD) != HIPFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] hipfftExecC2C failed!",__FUNCTION__,__LINE__); return; } } //transform hipMemcpy(t_f,d_fftData,sizeof(hipfftComplex)*bat*tupe,hipMemcpyDeviceToHost); hipfftComplex* t_f2 = (hipfftComplex*)malloc(sizeof(hipfftComplex)*tupe*bat); for(int i=0;i<bat;i++){ for(int j=0;j<tupe;j++){ t_f2[j*bat+i]=t_f[i*tupe+j]; } } /*printf("\n============================\n"); for(int i=0;i<bat*tupe;i++){ printf("[%f %f]",t_f2[i].x,t_f2[i].y); } printf("\n============================\n"); */ hipMemcpy(d_fftData,t_f2,sizeof(hipfftComplex)*bat*tupe,hipMemcpyHostToDevice); if(hipfftDestroy(plan)!=HIPFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] hipfftDestroy failed!",__FUNCTION__,__LINE__); return; } if(t_f != NULL){ free(t_f); t_f = NULL; } if(t_f2 !=NULL){ free(t_f2); t_f2 = NULL; } //tsvd hipsolverDnHandle_t handle; hipsolverGesvdjInfo_t params; int* info = NULL; int echo = 1; int lda = m; int ldu = m; int ldv = n; int lwork = 0; hipComplex* work=NULL; //malloc u s v float* d_s = NULL; hipComplex* d_u = NULL; hipComplex* d_v = NULL; hipMalloc((void**)&d_s,sizeof(float)*tupe*((m<n)?m:n)); hipMalloc((void**)&d_u,sizeof(hipComplex)*tupe*m*((m<n)?m:n)); hipMalloc((void**)&d_v,sizeof(hipComplex)*tupe*n*((m<n)?m:n)); hipMalloc((void**)&info,sizeof(int)); if(hipsolverDnCreate(&handle) != CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] hipsolverDnCreate failed!",__FUNCTION__,__LINE__); return; } if(hipsolverDnCreateGesvdjInfo(&params) != CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:creation svd info srror",__FUNCTION__,__LINE__); return; } if(hipsolverDnCgesvdj_bufferSize( handle, HIPSOLVER_EIG_MODE_VECTOR, echo, m, n, d_fftData, m, d_s, d_u, ldu, d_v, ldv, &lwork, params) != CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR: create buffersize failed!",__FUNCTION__,__LINE__); return; } if(hipDeviceSynchronize() != hipSuccess){ fprintf(stdout,"[%s]:[%d] cuda syncthronize err!",__FUNCTION__,__LINE__); return; } hipMalloc((void**)&work,sizeof(hipComplex)*lwork); int step_d = m*n; int step_u = m*((m<n)?m:n); int step_s = ((m<n)?m:n); int step_v = n*((m<n)?m:n); for(int i=0;i<tupe;i++){ if(hipsolverDnCgesvdj( handle, HIPSOLVER_EIG_MODE_VECTOR, echo, m, n, d_fftData+step_d*i, lda, d_s+i*step_s, d_u+i*step_u, ldu, d_v+i*step_v, ldv, work, lwork, info, params) != CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:hipsolverDnCgesvdj failed!",__FUNCTION__,__LINE__); return; } } if(hipDeviceSynchronize() != hipSuccess){ fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__); return; } if(hipsolverDnDestroy(handle)!=CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] hipsolverDnDestroy failed!",__FUNCTION__,__LINE__); return; } if(hipsolverDnDestroyGesvdjInfo(params)!=CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] hipsolverDnDestroy failed!",__FUNCTION__,__LINE__); return; } if(d_fftData != NULL){ hipFree(d_fftData); d_fftData = NULL; } if(work != NULL){ hipFree(work); work = NULL; } if(info != NULL){ hipFree(info); info = NULL; } //ifft //transform hipComplex* h_u = (hipComplex*)malloc(sizeof(hipComplex)*tupe*step_u); hipComplex* h_u2 = (hipComplex*)malloc(sizeof(hipComplex)*tupe*step_u); hipComplex* h_v = (hipComplex*)malloc(sizeof(hipComplex)*tupe*step_v); hipComplex* h_v2 = (hipComplex*)malloc(sizeof(hipComplex)*tupe*step_v); hipComplex* h_s = (hipComplex*)malloc(sizeof(hipComplex)*tupe*step_s); float* h_s2 = (float*)malloc(sizeof(float)*tupe*step_s); hipComplex* d_s2; hipMalloc((void**)&d_s2,sizeof(hipComplex)*tupe*step_s); hipMemcpy(h_u2,d_u,sizeof(hipComplex)*tupe*step_u,hipMemcpyDeviceToHost); hipMemcpy(h_v2,d_v,sizeof(hipComplex)*tupe*step_v,hipMemcpyDeviceToHost); hipMemcpy(h_s2,d_s,sizeof(float)*tupe*step_s,hipMemcpyDeviceToHost); /*printf("\n============================\n"); for(int i=0;i<tupe*step_s;i++){ printf("[%f ]",h_s2[i]); } printf("\n============================\n"); */ //transform_u for(int i=0;i<step_u;i++){ for(int j=0;j<tupe;j++){ h_u[i*tupe+j]=h_u2[j*step_u+i]; } } //transform_v for(int i=0;i<step_v;i++){ for(int j=0;j<tupe;j++){ h_v[i*tupe+j]=h_v2[j*step_v+i]; } } //transform_s for(int i=0;i<step_s;i++){ for(int j=0;j<tupe;j++){ h_s[i*tupe+j].x=h_s2[j*step_s+i]; h_s[i*tupe+j].y=0; } } /* for(int i=0;i<tupe*step_s;i++){ printf("%f ",h_s2[i]); } printf("\n"); */ hipMemcpy(d_u,h_u,sizeof(hipComplex)*tupe*step_u,hipMemcpyHostToDevice); hipMemcpy(d_s2,h_s,sizeof(hipComplex)*tupe*step_s,hipMemcpyHostToDevice); hipMemcpy(d_v,h_v,sizeof(hipComplex)*tupe*step_v,hipMemcpyHostToDevice); if(h_u2 != NULL){ free(h_u2); h_u2 = NULL; } if(h_v2 != NULL){ free(h_v2); h_v2 = NULL; } if(h_s2 != NULL){ free(h_s2); h_s2= NULL; } if(d_s != NULL){ hipFree(d_s); d_s = NULL; } hipfftHandle iplan; if(hipfftPlan1d(&iplan,tupe,HIPFFT_C2C,1) != HIPFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUFFT ERROR: hipfftPlan1d failed!",__FUNCTION__,__LINE__); return; } if(hipDeviceSynchronize() != hipSuccess){ fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__); return; } //ifft_u for(int i=0;i<step_u;i++){ if(hipfftExecC2C(iplan,d_u+i*tupe,d_u+i*tupe,HIPFFT_BACKWARD) != HIPFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUFFT ERROR:hipfftExecC2C failed!",__FUNCTION__,__LINE__); return; } } if(hipDeviceSynchronize() != hipSuccess){ fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__); return; } //ifft_v if(hipfftPlan1d(&iplan,tupe,HIPFFT_C2C,1) != HIPFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUFFT ERROR: hipfftPlan1d failed!",__FUNCTION__,__LINE__); return; } for(int i=0;i<step_v;i++){ if(hipfftExecC2C(iplan,d_v+i*tupe,d_v+i*tupe,HIPFFT_BACKWARD) != HIPFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d]CUFFT ERROR: cufftExecc2Cfailed!",__FUNCTION__,__LINE__); return; } } if(hipDeviceSynchronize() != hipSuccess){ fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__); return; } //ifft_s if(hipfftPlan1d(&iplan,tupe,HIPFFT_C2C,1) != HIPFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUFFT ERROR: hipfftPlan1d failed!",__FUNCTION__,__LINE__); return; } for(int i=0;i<step_s;i++){ if(hipfftExecC2C(iplan,d_s2+i*tupe,d_s2+i*tupe,HIPFFT_BACKWARD) != HIPFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUFFT ERROR: hipfftExecC2C failed!",__FUNCTION__,__LINE__); return; } } //transform hipMemcpy(h_u,d_u,sizeof(hipComplex)*tupe*step_u,hipMemcpyDeviceToHost); hipMemcpy(h_v,d_v,sizeof(hipComplex)*tupe*step_v,hipMemcpyDeviceToHost); hipMemcpy(h_s,d_s2,sizeof(hipComplex)*tupe*step_s,hipMemcpyDeviceToHost); //transform_u for(int i=0;i<step_u;i++){ for(int j=0;j<tupe;j++){ U[j*step_u+i]=h_u[i*tupe+j].x/tupe; // U[j*step_u+i].y=h_u[i*tupe+j].y/tupe; } } //transform_v for(int i=0;i<step_v;i++){ for(int j=0;j<tupe;j++){ V[j*step_v+i]=h_v[i*tupe+j].x/tupe; // V[j*step_v+i].y=h_v[i*tupe+j].y/tupe; } } //transform_s for(int i=0;i<step_s;i++){ for(int j=0;j<tupe;j++){ S[j*step_s+i]=h_s[i*tupe+j].x/tupe; // S[j*step_s+j].y=h_s[i*tupe+j].y/tupe; } } if(hipfftDestroy(iplan)!=HIPFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] hipfftDestroy failed!",__FUNCTION__,__LINE__); return; } if(d_u != NULL){ hipFree(d_u); d_u =NULL; } if(d_v != NULL){ hipFree(d_v); d_v = NULL; } if(d_s2 != NULL){ hipFree(d_s2); d_s2 = NULL; } if(h_u !=NULL){ free(h_u); h_u = NULL; } if(h_v != NULL){ free(h_v); h_v = NULL; } if(h_s != NULL){ free(h_s); h_s = NULL; } }
9cf41573242dbd59a4eaf02197e78d3fea80f146.cu
#include "svd.h" #include "based.h" void basedtsvd(float* t,const int m,const int n,const int tupe,float* U,float* S,float* V){ int bat = m*n; cufftComplex* t_f = (cufftComplex*)malloc(bat*tupe*sizeof(cufftComplex)); //transform for(int i=0;i<bat;i++){ for(int j=0;j<tupe;j++){ t_f[i*tupe+j].x=t[j*bat+i]; t_f[i*tupe+j].y=0; } } //tfft:C2C cufftComplex* d_fftData; cudaMalloc((void**)&d_fftData,tupe*bat*sizeof(cufftComplex)); cudaMemcpy(d_fftData,t_f,bat*tupe*sizeof(cufftComplex),cudaMemcpyHostToDevice); cufftHandle plan; if(cufftPlan1d(&plan,tupe,CUFFT_C2C,1) != CUFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] cufftPlan1d failed!",__FUNCTION__,__LINE__); return; } if(cudaDeviceSynchronize() != cudaSuccess){ fprintf(stdout,"[%s]:[%d] cuda syncthronize err!",__FUNCTION__,__LINE__); return; } for(int i=0;i<bat;i++){ if(cufftExecC2C(plan,d_fftData+i*tupe,d_fftData+i*tupe,CUFFT_FORWARD) != CUFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] cufftExecC2C failed!",__FUNCTION__,__LINE__); return; } } //transform cudaMemcpy(t_f,d_fftData,sizeof(cufftComplex)*bat*tupe,cudaMemcpyDeviceToHost); cufftComplex* t_f2 = (cufftComplex*)malloc(sizeof(cufftComplex)*tupe*bat); for(int i=0;i<bat;i++){ for(int j=0;j<tupe;j++){ t_f2[j*bat+i]=t_f[i*tupe+j]; } } /*printf("\n============================\n"); for(int i=0;i<bat*tupe;i++){ printf("[%f %f]",t_f2[i].x,t_f2[i].y); } printf("\n============================\n"); */ cudaMemcpy(d_fftData,t_f2,sizeof(cufftComplex)*bat*tupe,cudaMemcpyHostToDevice); if(cufftDestroy(plan)!=CUFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] cufftDestroy failed!",__FUNCTION__,__LINE__); return; } if(t_f != NULL){ free(t_f); t_f = NULL; } if(t_f2 !=NULL){ free(t_f2); t_f2 = NULL; } //tsvd cusolverDnHandle_t handle; gesvdjInfo_t params; int* info = NULL; int echo = 1; int lda = m; int ldu = m; int ldv = n; int lwork = 0; cuComplex* work=NULL; //malloc u s v float* d_s = NULL; cuComplex* d_u = NULL; cuComplex* d_v = NULL; cudaMalloc((void**)&d_s,sizeof(float)*tupe*((m<n)?m:n)); cudaMalloc((void**)&d_u,sizeof(cuComplex)*tupe*m*((m<n)?m:n)); cudaMalloc((void**)&d_v,sizeof(cuComplex)*tupe*n*((m<n)?m:n)); cudaMalloc((void**)&info,sizeof(int)); if(cusolverDnCreate(&handle) != CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] cusolverDnCreate failed!",__FUNCTION__,__LINE__); return; } if(cusolverDnCreateGesvdjInfo(&params) != CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:creation svd info srror",__FUNCTION__,__LINE__); return; } if(cusolverDnCgesvdj_bufferSize( handle, CUSOLVER_EIG_MODE_VECTOR, echo, m, n, d_fftData, m, d_s, d_u, ldu, d_v, ldv, &lwork, params) != CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR: create buffersize failed!",__FUNCTION__,__LINE__); return; } if(cudaDeviceSynchronize() != cudaSuccess){ fprintf(stdout,"[%s]:[%d] cuda syncthronize err!",__FUNCTION__,__LINE__); return; } cudaMalloc((void**)&work,sizeof(cuComplex)*lwork); int step_d = m*n; int step_u = m*((m<n)?m:n); int step_s = ((m<n)?m:n); int step_v = n*((m<n)?m:n); for(int i=0;i<tupe;i++){ if(cusolverDnCgesvdj( handle, CUSOLVER_EIG_MODE_VECTOR, echo, m, n, d_fftData+step_d*i, lda, d_s+i*step_s, d_u+i*step_u, ldu, d_v+i*step_v, ldv, work, lwork, info, params) != CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUSOLVER ERROR:cusolverDnCgesvdj failed!",__FUNCTION__,__LINE__); return; } } if(cudaDeviceSynchronize() != cudaSuccess){ fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__); return; } if(cusolverDnDestroy(handle)!=CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] cusolverDnDestroy failed!",__FUNCTION__,__LINE__); return; } if(cusolverDnDestroyGesvdjInfo(params)!=CUSOLVER_STATUS_SUCCESS){ fprintf(stdout,"[%s]:[%d] cusolverDnDestroy failed!",__FUNCTION__,__LINE__); return; } if(d_fftData != NULL){ cudaFree(d_fftData); d_fftData = NULL; } if(work != NULL){ cudaFree(work); work = NULL; } if(info != NULL){ cudaFree(info); info = NULL; } //ifft //transform cuComplex* h_u = (cuComplex*)malloc(sizeof(cuComplex)*tupe*step_u); cuComplex* h_u2 = (cuComplex*)malloc(sizeof(cuComplex)*tupe*step_u); cuComplex* h_v = (cuComplex*)malloc(sizeof(cuComplex)*tupe*step_v); cuComplex* h_v2 = (cuComplex*)malloc(sizeof(cuComplex)*tupe*step_v); cuComplex* h_s = (cuComplex*)malloc(sizeof(cuComplex)*tupe*step_s); float* h_s2 = (float*)malloc(sizeof(float)*tupe*step_s); cuComplex* d_s2; cudaMalloc((void**)&d_s2,sizeof(cuComplex)*tupe*step_s); cudaMemcpy(h_u2,d_u,sizeof(cuComplex)*tupe*step_u,cudaMemcpyDeviceToHost); cudaMemcpy(h_v2,d_v,sizeof(cuComplex)*tupe*step_v,cudaMemcpyDeviceToHost); cudaMemcpy(h_s2,d_s,sizeof(float)*tupe*step_s,cudaMemcpyDeviceToHost); /*printf("\n============================\n"); for(int i=0;i<tupe*step_s;i++){ printf("[%f ]",h_s2[i]); } printf("\n============================\n"); */ //transform_u for(int i=0;i<step_u;i++){ for(int j=0;j<tupe;j++){ h_u[i*tupe+j]=h_u2[j*step_u+i]; } } //transform_v for(int i=0;i<step_v;i++){ for(int j=0;j<tupe;j++){ h_v[i*tupe+j]=h_v2[j*step_v+i]; } } //transform_s for(int i=0;i<step_s;i++){ for(int j=0;j<tupe;j++){ h_s[i*tupe+j].x=h_s2[j*step_s+i]; h_s[i*tupe+j].y=0; } } /* for(int i=0;i<tupe*step_s;i++){ printf("%f ",h_s2[i]); } printf("\n"); */ cudaMemcpy(d_u,h_u,sizeof(cuComplex)*tupe*step_u,cudaMemcpyHostToDevice); cudaMemcpy(d_s2,h_s,sizeof(cuComplex)*tupe*step_s,cudaMemcpyHostToDevice); cudaMemcpy(d_v,h_v,sizeof(cuComplex)*tupe*step_v,cudaMemcpyHostToDevice); if(h_u2 != NULL){ free(h_u2); h_u2 = NULL; } if(h_v2 != NULL){ free(h_v2); h_v2 = NULL; } if(h_s2 != NULL){ free(h_s2); h_s2= NULL; } if(d_s != NULL){ cudaFree(d_s); d_s = NULL; } cufftHandle iplan; if(cufftPlan1d(&iplan,tupe,CUFFT_C2C,1) != CUFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUFFT ERROR: cufftPlan1d failed!",__FUNCTION__,__LINE__); return; } if(cudaDeviceSynchronize() != cudaSuccess){ fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__); return; } //ifft_u for(int i=0;i<step_u;i++){ if(cufftExecC2C(iplan,d_u+i*tupe,d_u+i*tupe,CUFFT_INVERSE) != CUFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUFFT ERROR:cufftExecC2C failed!",__FUNCTION__,__LINE__); return; } } if(cudaDeviceSynchronize() != cudaSuccess){ fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__); return; } //ifft_v if(cufftPlan1d(&iplan,tupe,CUFFT_C2C,1) != CUFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUFFT ERROR: cufftPlan1d failed!",__FUNCTION__,__LINE__); return; } for(int i=0;i<step_v;i++){ if(cufftExecC2C(iplan,d_v+i*tupe,d_v+i*tupe,CUFFT_INVERSE) != CUFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d]CUFFT ERROR: cufftExecc2Cfailed!",__FUNCTION__,__LINE__); return; } } if(cudaDeviceSynchronize() != cudaSuccess){ fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__); return; } //ifft_s if(cufftPlan1d(&iplan,tupe,CUFFT_C2C,1) != CUFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUFFT ERROR: cufftPlan1d failed!",__FUNCTION__,__LINE__); return; } for(int i=0;i<step_s;i++){ if(cufftExecC2C(iplan,d_s2+i*tupe,d_s2+i*tupe,CUFFT_INVERSE) != CUFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] CUFFT ERROR: cufftExecC2C failed!",__FUNCTION__,__LINE__); return; } } //transform cudaMemcpy(h_u,d_u,sizeof(cuComplex)*tupe*step_u,cudaMemcpyDeviceToHost); cudaMemcpy(h_v,d_v,sizeof(cuComplex)*tupe*step_v,cudaMemcpyDeviceToHost); cudaMemcpy(h_s,d_s2,sizeof(cuComplex)*tupe*step_s,cudaMemcpyDeviceToHost); //transform_u for(int i=0;i<step_u;i++){ for(int j=0;j<tupe;j++){ U[j*step_u+i]=h_u[i*tupe+j].x/tupe; // U[j*step_u+i].y=h_u[i*tupe+j].y/tupe; } } //transform_v for(int i=0;i<step_v;i++){ for(int j=0;j<tupe;j++){ V[j*step_v+i]=h_v[i*tupe+j].x/tupe; // V[j*step_v+i].y=h_v[i*tupe+j].y/tupe; } } //transform_s for(int i=0;i<step_s;i++){ for(int j=0;j<tupe;j++){ S[j*step_s+i]=h_s[i*tupe+j].x/tupe; // S[j*step_s+j].y=h_s[i*tupe+j].y/tupe; } } if(cufftDestroy(iplan)!=CUFFT_SUCCESS){ fprintf(stdout,"[%s]:[%d] cufftDestroy failed!",__FUNCTION__,__LINE__); return; } if(d_u != NULL){ cudaFree(d_u); d_u =NULL; } if(d_v != NULL){ cudaFree(d_v); d_v = NULL; } if(d_s2 != NULL){ cudaFree(d_s2); d_s2 = NULL; } if(h_u !=NULL){ free(h_u); h_u = NULL; } if(h_v != NULL){ free(h_v); h_v = NULL; } if(h_s != NULL){ free(h_s); h_s = NULL; } }
5d245356054c1a351d07547b21ff415195a65f7c.hip
// !!! This is a file automatically generated by hipify!!! //raytracer.mustafaisik.net// #include "world.cuh" #include "camera.cuh" #include "instance.cuh" #include "material.cuh" #include "mesh.cuh" #include "intensity_light.cuh" #include "rectangle_light.cuh" #include "renderer_hip.cuh" #include "scene.cuh" #include "sphere_hip.cuh" #include "triangle.cuh" #include "system.cuh" #include "timer.cuh" #include "texture_manager.cuh" #include <fstream> #include <sstream> #include <vector> #include <GLFW/glfw3.h> #include <glm/glm.hpp> #include <glm/gtx/transform.hpp> #include <assimp/Importer.hpp> #include <assimp/scene.h> #include <assimp/postprocess.h> #include "gl/tinyxml2.h" using namespace tinyxml2; __constant__ float gPerlinGradients[36] = { 1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, -1.0f, -1.0f, 0.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, -1.0f, -1.0f }; __constant__ int gPerlinPermutation[512] = { 151,160,137,91,90,15, 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180,151,160,137,91,90,15, 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180 }; World::World() : m_system(nullptr) , m_renderer(nullptr) , m_camera(nullptr) , m_scene(nullptr) , m_camera_speed(1) {} World::~World() {} void World::loadScene(const std::string& filepath) { XMLDocument file; std::stringstream stream; auto res = file.LoadFile(filepath.c_str()); if (res) { throw std::runtime_error("Error: Cannot load the xml file."); } XMLNode* root = file.FirstChild(); if (!root) { throw std::runtime_error("Error: Root is not found."); } //Camera speed auto element = root->FirstChildElement("CameraSpeed"); if (element) { stream << element->GetText() << std::endl; stream >> m_camera_speed; stream.clear(); } else { m_camera_speed = 1.0; } //Image name element = root->FirstChildElement("ImageName"); if (element) { stream << element->GetText() << std::endl; stream >> m_image_name; stream.clear(); } else { throw std::runtime_error("Error: Image name is not specified."); } //Image resolution element = root->FirstChildElement("ImageResolution"); if (element) { stream << element->GetText() << std::endl; stream >> m_screen_width >> m_screen_height; stream.clear(); } else { throw std::runtime_error("Error: Image resolution is not specified."); } //Camera glm::vec3 position, gaze, up; glm::vec4 near_plane; float near_distance; float focus_distance; float aperture_radius; element = root->FirstChildElement("Camera"); if (!element) { throw std::runtime_error("Error: Camera is not specified."); } //Camera-position auto child = element->FirstChildElement("Position"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Camera Position is not specified."); } //Camera-gaze child = element->FirstChildElement("Gaze"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Camera Gaze is not specified."); } //Camera-up child = element->FirstChildElement("Up"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Camera Up is not specified."); } //Camera-near plane child = element->FirstChildElement("NearPlane"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Camera NearPlane is not specified."); } //Camera-near distance child = element->FirstChildElement("NearDistance"); if (child) { stream << child->GetText() << std::endl; } else { stream << "1" << std::endl; } //Camera-focus distance child = element->FirstChildElement("FocusDistance"); if (child) { stream << child->GetText() << std::endl; } else { stream << "1" << std::endl; } //Camera-aperture radius child = element->FirstChildElement("ApertureRadius"); if (child) { stream << child->GetText() << std::endl; } else { stream << "0" << std::endl; } stream >> position.x >> position.y >> position.z; stream >> gaze.x >> gaze.y >> gaze.z; stream >> up.x >> up.y >> up.z; stream >> near_plane.x >> near_plane.y >> near_plane.z >> near_plane.w; stream >> near_distance; stream >> focus_distance; stream >> aperture_radius; stream.clear(); //Initialize the modules. m_renderer.reset(new Renderer(m_screen_width, m_screen_height)); m_camera.reset(new Camera(position, glm::normalize(gaze) * near_distance, up, near_plane, glm::ivec2(m_screen_width, m_screen_height), aperture_radius, focus_distance)); m_scene.reset(new Scene()); //Some constants element = root->FirstChildElement("BackgroundColor"); if (element) { stream << element->GetText() << std::endl; stream >> m_scene->background_color.x >> m_scene->background_color.y >> m_scene->background_color.z; stream.clear(); } element = root->FirstChildElement("ShadowRayEpsilon"); if (element) { stream << element->GetText() << std::endl; stream >> m_scene->shadow_ray_epsilon; stream.clear(); } //Lights std::vector<IntensityLight> intensity_lights; std::vector<RectangleLight> rectangle_lights; glm::vec3 intensity; glm::vec3 edge_vector1, edge_vector2; float falloff_angle, cutoff_angle; if (element = root->FirstChildElement("Lights")) { //Point lights element = element->FirstChildElement("PointLight"); while (element) { child = element->FirstChildElement("Position"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: PointLight Position is not specified."); } child = element->FirstChildElement("Intensity"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: PointLight Intensity is not specified."); } stream >> position.x >> position.y >> position.z; stream >> intensity.x >> intensity.y >> intensity.z; intensity_lights.push_back(IntensityLight(position, intensity)); element = element->NextSiblingElement("PointLight"); stream.clear(); } //Spot lights element = root->FirstChildElement("Lights"); element = element->FirstChildElement("SpotLight"); while (element) { child = element->FirstChildElement("Position"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Position is not specified."); } child = element->FirstChildElement("Direction"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Direction is not specified."); } child = element->FirstChildElement("Intensity"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Intensity is not specified."); } child = element->FirstChildElement("FalloffAngle"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight FalloffAngle is not specified."); } child = element->FirstChildElement("CutoffAngle"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight CutoffAngle is not specified."); } stream >> position.x >> position.y >> position.z; stream >> gaze.x >> gaze.y >> gaze.z; stream >> intensity.x >> intensity.y >> intensity.z; stream >> falloff_angle >> cutoff_angle; intensity_lights.push_back(IntensityLight(position, intensity, gaze, falloff_angle, cutoff_angle)); element = element->NextSiblingElement("SpotLight"); stream.clear(); } if (intensity_lights.size()) { m_scene->addIntensityLights(intensity_lights); } //Rectangle lights element = root->FirstChildElement("Lights"); element = element->FirstChildElement("RectangleLight"); while (element) { child = element->FirstChildElement("Radiance"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Radiance is not specified."); } child = element->FirstChildElement("Position"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Position is not specified."); } child = element->FirstChildElement("Edge1"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Edge1 is not specified."); } child = element->FirstChildElement("Edge2"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Edge2 is not specified."); } stream >> intensity.x >> intensity.y >> intensity.z; stream >> position.x >> position.y >> position.z; stream >> edge_vector1.x >> edge_vector1.y >> edge_vector1.z; stream >> edge_vector2.x >> edge_vector2.y >> edge_vector2.z; rectangle_lights.push_back(RectangleLight(intensity, position, edge_vector1, edge_vector2)); element = element->NextSiblingElement("RectangleLight"); stream.clear(); } if (rectangle_lights.size()) { m_scene->addRectangleLights(rectangle_lights); } } //Textures std::string image_path, filter_mode_str, address_mode_str, perlin_mode_str; Texture::FilterMode filter_mode; Texture::AddressMode address_mode; Texture::PerlinMode perlin_mode; float perlin_scale; element = root->FirstChildElement("Textures"); if (element) { element = element->FirstChildElement("Texture"); while (element) { if (element->Attribute("type")) { if (std::string(element->Attribute("type")) == "Image") { child = element->FirstChildElement("ImagePath"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Texture ImagePath is not specified."); } child = element->FirstChildElement("FilterMode"); if (child) { stream << child->GetText() << std::endl; } else { stream << "nearest" << std::endl; } child = element->FirstChildElement("AddressMode"); if (child) { stream << child->GetText() << std::endl; } else { stream << "repeat" << std::endl; } stream >> image_path >> filter_mode_str >> address_mode_str; //FilterMode if (filter_mode_str == "nearest") { filter_mode = Texture::NEAREST; } else if (filter_mode_str == "bilinear") { filter_mode = Texture::BILINEAR; } else { throw std::runtime_error("Error: Wrong FilterMode parameter."); } //AddressMode if (address_mode_str == "repeat") { address_mode = Texture::REPEAT; } else if (address_mode_str == "clamp") { address_mode = Texture::CLAMP; } else { throw std::runtime_error("Error: Wrong AddressMode parameter."); } TextureManager::Manager().loadImageTexture(image_path.c_str(), Texture::SampleParams(filter_mode, address_mode)); } else if (std::string(element->Attribute("type")) == "Perlin") { child = element->FirstChildElement("PerlinMode"); if (child) { stream << child->GetText() << std::endl; } else { stream << "patch" << std::endl; } child = element->FirstChildElement("ScalingFactor"); if (child) { stream << child->GetText() << std::endl; } else { stream << "1" << std::endl; } stream >> perlin_mode_str >> perlin_scale; //PerlinMode if (perlin_mode_str == "patch") { perlin_mode = Texture::PATCH; } else if (perlin_mode_str == "vein") { perlin_mode = Texture::VEIN; } else { throw std::runtime_error("Error: Wrong PerlinMode parameter."); } TextureManager::Manager().loadPerlinTexture(Texture::SampleParams(perlin_mode, perlin_scale)); } else { throw std::runtime_error("Error: Wrong texture type."); } } else { throw std::runtime_error("Error: Texture type is not specified."); } element = element->NextSiblingElement("Texture"); stream.clear(); } if (TextureManager::Manager().get_textures().size()) { m_scene->addTextures(TextureManager::Manager().get_textures()); } } //Materials element = root->FirstChildElement("Materials"); std::vector<Material> materials; glm::vec3 dont_care3f(0.0f, 0.0f, 0.0f); float dont_care1f = 0.0f; glm::vec3 emission, diffuse, specular, mirror, tint; int texture_id; float tint_distance, ior, scattering_coefficient, anisotropy; element = element->FirstChildElement("Material"); while (element) { if (element->Attribute("type")) { if (std::string(element->Attribute("type")) == "Emissive") { child = element->FirstChildElement("Emission"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material Emission is not specified."); } stream >> emission.x >> emission.y >> emission.z; materials.push_back(Material(emission, dont_care3f, dont_care3f, dont_care3f, dont_care3f, dont_care1f, dont_care1f, dont_care1f, nullptr, dont_care1f, Material::EMISSIVE)); } else if (std::string(element->Attribute("type")) == "Lambertian") { child = element->FirstChildElement("DiffuseReflectance"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material DiffuseReflectance is not specified."); } child = element->FirstChildElement("DiffuseTexture"); if (child) { stream << child->GetText() << std::endl; } else { stream << "-1" << std::endl; } stream >> diffuse.x >> diffuse.y >> diffuse.z; stream >> texture_id; const Texture* diffuse_texture_ptr = nullptr; if (texture_id >= 0) { diffuse_texture_ptr = m_scene->textures.first + texture_id; } materials.push_back(Material(dont_care3f, diffuse, dont_care3f, dont_care3f, dont_care3f, dont_care1f, dont_care1f, dont_care1f, diffuse_texture_ptr, dont_care1f, Material::LAMBERTIAN)); } else if (std::string(element->Attribute("type")) == "PerfectSpecular") { child = element->FirstChildElement("SpecularReflectance"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material SpecularReflectance is not specified."); } stream >> mirror.x >> mirror.y >> mirror.z; materials.push_back(Material(dont_care3f, dont_care3f, dont_care3f, mirror, dont_care3f, dont_care1f, dont_care1f, dont_care1f, nullptr, dont_care1f, Material::PERFECT_SPECULAR)); } else if (std::string(element->Attribute("type")) == "PerfectRefractive") { child = element->FirstChildElement("Tint"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material Tint is not specified."); } child = element->FirstChildElement("TintDistance"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material TintDistance is not specified."); } child = element->FirstChildElement("IOR"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material IOR is not specified."); } stream >> tint.x >> tint.y >> tint.z; stream >> tint_distance; stream >> ior; materials.push_back(Material(dont_care3f, dont_care3f, dont_care3f, dont_care3f, tint, tint_distance, dont_care1f, dont_care1f, nullptr, ior, Material::PERFECT_REFRACTIVE)); } else if (std::string(element->Attribute("type")) == "Translucent") { child = element->FirstChildElement("Tint"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material Tint is not specified."); } child = element->FirstChildElement("TintDistance"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material TintDistance is not specified."); } child = element->FirstChildElement("IOR"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material IOR is not specified."); } child = element->FirstChildElement("ScatteringCoefficient"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material ScatteringCoefficient is not specified."); } child = element->FirstChildElement("Anisotropy"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material Anisotropy is not specified."); } stream >> tint.x >> tint.y >> tint.z; stream >> tint_distance; stream >> ior; stream >> scattering_coefficient; stream >> anisotropy; materials.push_back(Material(dont_care3f, dont_care3f, dont_care3f, dont_care3f, tint, tint_distance, scattering_coefficient, anisotropy, nullptr, ior, Material::TRANSLUCENT)); } else { throw std::runtime_error("Error: Wrong material type."); } } else { throw std::runtime_error("Error: Material type is not specified."); } element = element->NextSiblingElement("Material"); stream.clear(); } if (materials.size()) { m_scene->addMaterials(materials); } //Objects-Spheres std::vector<Sphere> spheres; unsigned int material_id; element = root->FirstChildElement("Objects"); if (!element) { throw std::runtime_error("Error: There is no object to render."); } element = element->FirstChildElement("Sphere"); while (element) { glm::mat4 sphere_transformation; child = element->FirstChildElement("Transformation"); if (child) { glm::vec3 scaling, rotation, translation; float angle; auto child2 = child->FirstChildElement("Scaling"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "1 1 1" << std::endl; } child2 = child->FirstChildElement("Rotation"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "1 0 0 0" << std::endl; } child2 = child->FirstChildElement("Translation"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "0 0 0" << std::endl; } stream >> scaling.x >> scaling.y >> scaling.z; stream >> rotation.x >> rotation.y >> rotation.z >> angle; stream >> translation.x >> translation.y >> translation.z; stream.clear(); sphere_transformation = glm::scale(scaling) * sphere_transformation; sphere_transformation = glm::rotate(glm::radians(angle), rotation) * sphere_transformation; sphere_transformation = glm::translate(translation) * sphere_transformation; } child = element->FirstChildElement("Material"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Sphere Material is not specified."); } stream >> material_id; spheres.push_back(Sphere(Instance(sphere_transformation), material_id)); element = element->NextSiblingElement("Sphere"); stream.clear(); } if (spheres.size()) { m_scene->addSpheres(spheres); } //Objects-Mesh std::map<int, Mesh> meshes; element = root->FirstChildElement("Objects"); element = element->FirstChildElement("Mesh"); while (element) { int mesh_id; if (element->Attribute("id")) { mesh_id = std::stoi(element->Attribute("id")); } else { throw std::runtime_error("Error: Mesh id is not specified."); } //Transformation glm::mat4 mesh_transformation; child = element->FirstChildElement("Transformation"); if (child) { glm::vec3 scaling, rotation, translation; float angle; auto child2 = child->FirstChildElement("Scaling"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "1 1 1" << std::endl; } child2 = child->FirstChildElement("Rotation"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "1 0 0 0" << std::endl; } child2 = child->FirstChildElement("Translation"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "0 0 0" << std::endl; } stream >> scaling.x >> scaling.y >> scaling.z; stream >> rotation.x >> rotation.y >> rotation.z >> angle; stream >> translation.x >> translation.y >> translation.z; stream.clear(); mesh_transformation = glm::scale(scaling) * mesh_transformation; mesh_transformation = glm::rotate(glm::radians(angle), rotation) * mesh_transformation; mesh_transformation = glm::translate(translation) * mesh_transformation; } //Material child = element->FirstChildElement("Material"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Mesh Material is not specified."); } stream >> material_id; int instance_of = -1; auto element_instance_of = root->FirstChildElement("Objects")->FirstChildElement("Mesh"); if (element->Attribute("instanceOf")) { instance_of = std::stoi(element->Attribute("instanceOf")); while (std::stoi(element_instance_of->Attribute("id")) != instance_of) { element_instance_of = element_instance_of->NextSiblingElement("Mesh"); } } //Build the mesh. std::vector<Triangle> mesh_triangles; BBox mesh_bbox; if (instance_of >= 0) { child = element_instance_of->FirstChildElement("Data"); } else { child = element->FirstChildElement("Data"); } //Use assimp to load the model. if (child) { Assimp::Importer importer; const aiScene* scene = importer.ReadFile(child->GetText(), aiProcess_Triangulate | aiProcess_JoinIdenticalVertices | aiProcess_GenSmoothNormals); if (!scene) { throw std::runtime_error("Error: Assimp cannot load the model."); } if (scene->mNumMeshes > 1) { throw std::runtime_error("Error: More than one mesh to handle."); } aiMesh* mesh = scene->mMeshes[0]; int face_count = mesh->mNumFaces; glm::vec3 face_vertices[3]; glm::vec3 face_normals[3]; glm::vec2 face_uvs[3]; for (int i = 0; i < face_count; ++i) { const aiFace& face = mesh->mFaces[i]; for (int k = 0; k < 3; ++k) { const aiVector3D& position = mesh->mVertices[face.mIndices[k]]; const aiVector3D& normal = mesh->mNormals[face.mIndices[k]]; aiVector3D uv = mesh->HasTextureCoords(0) ? mesh->mTextureCoords[0][face.mIndices[k]] : aiVector3D(0.0f, 0.0f, 0.0f); face_vertices[k] = glm::vec3(position.x, position.y, position.z); face_normals[k] = glm::vec3(normal.x, normal.y, normal.z); face_uvs[k] = glm::vec2(uv.x, uv.y); } mesh_bbox.extend(mesh_transformation * glm::vec4(face_vertices[0], 1.0f)); mesh_bbox.extend(mesh_transformation * glm::vec4(face_vertices[1], 1.0f)); mesh_bbox.extend(mesh_transformation * glm::vec4(face_vertices[2], 1.0f)); mesh_triangles.push_back(Triangle(face_vertices[0], face_vertices[1] - face_vertices[0], face_vertices[2] - face_vertices[0], face_normals[0], face_normals[1], face_normals[2], face_uvs[0], face_uvs[1], face_uvs[2])); } } else { throw std::runtime_error("Error: Mesh Data is not specified."); } Mesh mesh(Instance(mesh_transformation), mesh_bbox, material_id); if (instance_of >= 0) { mesh.createInstanceMesh(meshes[instance_of]); } else { mesh.createBaseMesh(mesh_triangles); } meshes[mesh_id] = mesh; element = element->NextSiblingElement("Mesh"); stream.clear(); } if (meshes.size()) { auto meshes_size = meshes.size(); std::vector<Mesh> meshes_vector(meshes_size); for (int i = 0; i < meshes_size; ++i) { meshes_vector[i] = meshes[i]; } m_scene->addMeshes(meshes_vector); } } //Photo mode void World::photo() { auto& image = m_renderer->render(*m_camera, *m_scene, 0); image.save(std::string("C://Users//Mustafa//Desktop//") + m_image_name.c_str()); } //Video mode void World::video() { m_system.reset(new System(m_screen_width, m_screen_height)); Timer fps_timer; Timer timer; fps_timer.start(); timer.start(); int counter = 0; int sample_count = 0; int jitter_dimension = 2; int unbiased_at = 1; bool save_image = false; while (true) { auto& image = m_renderer->render(*m_camera, *m_scene, sample_count); m_system->updateWindow(image.get_image()); ++sample_count; ++counter; if (sample_count >= unbiased_at) { unbiased_at += (jitter_dimension * jitter_dimension); ++jitter_dimension; if (save_image) { image.save(std::string("C://Users//Mustafa//Desktop//") + "_" + std::to_string(sample_count) + "_" + m_image_name.c_str()); save_image = false; } } m_system->setWindowTitle("FPS: " + std::to_string(counter / fps_timer.getTime()) + " -_- Sample Count: " + std::to_string(sample_count) + " -_- Unbiased At: " + std::to_string(unbiased_at)); //CAMERA INPUTS// // // // auto delta_time = timer.getTime(); timer.start(); //Poll events before handling. glfwPollEvents(); //Handle keyboard inputs. if (m_system->queryKey(GLFW_KEY_ESCAPE, GLFW_PRESS)) { break; } if (m_system->queryKey(GLFW_KEY_ENTER, GLFW_PRESS)) { save_image = true; } auto right_disp = 0.0f; auto forward_disp = 0.0f; if (m_system->queryKey(GLFW_KEY_W, GLFW_PRESS)) { forward_disp += m_camera_speed * delta_time; } if (m_system->queryKey(GLFW_KEY_S, GLFW_PRESS)) { forward_disp -= m_camera_speed * delta_time; } if (m_system->queryKey(GLFW_KEY_D, GLFW_PRESS)) { right_disp += m_camera_speed * delta_time; } if (m_system->queryKey(GLFW_KEY_A, GLFW_PRESS)) { right_disp -= m_camera_speed * delta_time; } //Handle mouse inputs. auto current_x = 0.0; auto current_y = 0.0; m_system->getCursorPosition(current_x, current_y); static auto last_x = current_x; static auto last_y = current_y; auto x_offset = last_x - current_x; auto y_offset = last_y - current_y; last_x = current_x; last_y = current_y; x_offset *= 0.002f; y_offset *= 0.002f; //Update m_camera's position and orientation. if (right_disp != 0 || forward_disp != 0) { m_camera->move(right_disp, forward_disp); fps_timer.start(); counter = 0; sample_count = 0; jitter_dimension = 2; unbiased_at = 1; } if (x_offset != 0 || y_offset != 0) { m_camera->rotate(x_offset, y_offset); fps_timer.start(); counter = 0; sample_count = 0; jitter_dimension = 2; unbiased_at = 1; } // // // //CAMERA INPUTS// } }
5d245356054c1a351d07547b21ff415195a65f7c.cu
//raytracer.mustafaisik.net// #include "world.cuh" #include "camera.cuh" #include "instance.cuh" #include "material.cuh" #include "mesh.cuh" #include "intensity_light.cuh" #include "rectangle_light.cuh" #include "renderer.cuh" #include "scene.cuh" #include "sphere.cuh" #include "triangle.cuh" #include "system.cuh" #include "timer.cuh" #include "texture_manager.cuh" #include <fstream> #include <sstream> #include <vector> #include <GLFW/glfw3.h> #include <glm/glm.hpp> #include <glm/gtx/transform.hpp> #include <assimp/Importer.hpp> #include <assimp/scene.h> #include <assimp/postprocess.h> #include "gl/tinyxml2.h" using namespace tinyxml2; __constant__ float gPerlinGradients[36] = { 1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, -1.0f, -1.0f, 0.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, -1.0f, -1.0f }; __constant__ int gPerlinPermutation[512] = { 151,160,137,91,90,15, 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180,151,160,137,91,90,15, 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180 }; World::World() : m_system(nullptr) , m_renderer(nullptr) , m_camera(nullptr) , m_scene(nullptr) , m_camera_speed(1) {} World::~World() {} void World::loadScene(const std::string& filepath) { XMLDocument file; std::stringstream stream; auto res = file.LoadFile(filepath.c_str()); if (res) { throw std::runtime_error("Error: Cannot load the xml file."); } XMLNode* root = file.FirstChild(); if (!root) { throw std::runtime_error("Error: Root is not found."); } //Camera speed auto element = root->FirstChildElement("CameraSpeed"); if (element) { stream << element->GetText() << std::endl; stream >> m_camera_speed; stream.clear(); } else { m_camera_speed = 1.0; } //Image name element = root->FirstChildElement("ImageName"); if (element) { stream << element->GetText() << std::endl; stream >> m_image_name; stream.clear(); } else { throw std::runtime_error("Error: Image name is not specified."); } //Image resolution element = root->FirstChildElement("ImageResolution"); if (element) { stream << element->GetText() << std::endl; stream >> m_screen_width >> m_screen_height; stream.clear(); } else { throw std::runtime_error("Error: Image resolution is not specified."); } //Camera glm::vec3 position, gaze, up; glm::vec4 near_plane; float near_distance; float focus_distance; float aperture_radius; element = root->FirstChildElement("Camera"); if (!element) { throw std::runtime_error("Error: Camera is not specified."); } //Camera-position auto child = element->FirstChildElement("Position"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Camera Position is not specified."); } //Camera-gaze child = element->FirstChildElement("Gaze"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Camera Gaze is not specified."); } //Camera-up child = element->FirstChildElement("Up"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Camera Up is not specified."); } //Camera-near plane child = element->FirstChildElement("NearPlane"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Camera NearPlane is not specified."); } //Camera-near distance child = element->FirstChildElement("NearDistance"); if (child) { stream << child->GetText() << std::endl; } else { stream << "1" << std::endl; } //Camera-focus distance child = element->FirstChildElement("FocusDistance"); if (child) { stream << child->GetText() << std::endl; } else { stream << "1" << std::endl; } //Camera-aperture radius child = element->FirstChildElement("ApertureRadius"); if (child) { stream << child->GetText() << std::endl; } else { stream << "0" << std::endl; } stream >> position.x >> position.y >> position.z; stream >> gaze.x >> gaze.y >> gaze.z; stream >> up.x >> up.y >> up.z; stream >> near_plane.x >> near_plane.y >> near_plane.z >> near_plane.w; stream >> near_distance; stream >> focus_distance; stream >> aperture_radius; stream.clear(); //Initialize the modules. m_renderer.reset(new Renderer(m_screen_width, m_screen_height)); m_camera.reset(new Camera(position, glm::normalize(gaze) * near_distance, up, near_plane, glm::ivec2(m_screen_width, m_screen_height), aperture_radius, focus_distance)); m_scene.reset(new Scene()); //Some constants element = root->FirstChildElement("BackgroundColor"); if (element) { stream << element->GetText() << std::endl; stream >> m_scene->background_color.x >> m_scene->background_color.y >> m_scene->background_color.z; stream.clear(); } element = root->FirstChildElement("ShadowRayEpsilon"); if (element) { stream << element->GetText() << std::endl; stream >> m_scene->shadow_ray_epsilon; stream.clear(); } //Lights std::vector<IntensityLight> intensity_lights; std::vector<RectangleLight> rectangle_lights; glm::vec3 intensity; glm::vec3 edge_vector1, edge_vector2; float falloff_angle, cutoff_angle; if (element = root->FirstChildElement("Lights")) { //Point lights element = element->FirstChildElement("PointLight"); while (element) { child = element->FirstChildElement("Position"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: PointLight Position is not specified."); } child = element->FirstChildElement("Intensity"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: PointLight Intensity is not specified."); } stream >> position.x >> position.y >> position.z; stream >> intensity.x >> intensity.y >> intensity.z; intensity_lights.push_back(IntensityLight(position, intensity)); element = element->NextSiblingElement("PointLight"); stream.clear(); } //Spot lights element = root->FirstChildElement("Lights"); element = element->FirstChildElement("SpotLight"); while (element) { child = element->FirstChildElement("Position"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Position is not specified."); } child = element->FirstChildElement("Direction"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Direction is not specified."); } child = element->FirstChildElement("Intensity"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Intensity is not specified."); } child = element->FirstChildElement("FalloffAngle"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight FalloffAngle is not specified."); } child = element->FirstChildElement("CutoffAngle"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight CutoffAngle is not specified."); } stream >> position.x >> position.y >> position.z; stream >> gaze.x >> gaze.y >> gaze.z; stream >> intensity.x >> intensity.y >> intensity.z; stream >> falloff_angle >> cutoff_angle; intensity_lights.push_back(IntensityLight(position, intensity, gaze, falloff_angle, cutoff_angle)); element = element->NextSiblingElement("SpotLight"); stream.clear(); } if (intensity_lights.size()) { m_scene->addIntensityLights(intensity_lights); } //Rectangle lights element = root->FirstChildElement("Lights"); element = element->FirstChildElement("RectangleLight"); while (element) { child = element->FirstChildElement("Radiance"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Radiance is not specified."); } child = element->FirstChildElement("Position"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Position is not specified."); } child = element->FirstChildElement("Edge1"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Edge1 is not specified."); } child = element->FirstChildElement("Edge2"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: SpotLight Edge2 is not specified."); } stream >> intensity.x >> intensity.y >> intensity.z; stream >> position.x >> position.y >> position.z; stream >> edge_vector1.x >> edge_vector1.y >> edge_vector1.z; stream >> edge_vector2.x >> edge_vector2.y >> edge_vector2.z; rectangle_lights.push_back(RectangleLight(intensity, position, edge_vector1, edge_vector2)); element = element->NextSiblingElement("RectangleLight"); stream.clear(); } if (rectangle_lights.size()) { m_scene->addRectangleLights(rectangle_lights); } } //Textures std::string image_path, filter_mode_str, address_mode_str, perlin_mode_str; Texture::FilterMode filter_mode; Texture::AddressMode address_mode; Texture::PerlinMode perlin_mode; float perlin_scale; element = root->FirstChildElement("Textures"); if (element) { element = element->FirstChildElement("Texture"); while (element) { if (element->Attribute("type")) { if (std::string(element->Attribute("type")) == "Image") { child = element->FirstChildElement("ImagePath"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Texture ImagePath is not specified."); } child = element->FirstChildElement("FilterMode"); if (child) { stream << child->GetText() << std::endl; } else { stream << "nearest" << std::endl; } child = element->FirstChildElement("AddressMode"); if (child) { stream << child->GetText() << std::endl; } else { stream << "repeat" << std::endl; } stream >> image_path >> filter_mode_str >> address_mode_str; //FilterMode if (filter_mode_str == "nearest") { filter_mode = Texture::NEAREST; } else if (filter_mode_str == "bilinear") { filter_mode = Texture::BILINEAR; } else { throw std::runtime_error("Error: Wrong FilterMode parameter."); } //AddressMode if (address_mode_str == "repeat") { address_mode = Texture::REPEAT; } else if (address_mode_str == "clamp") { address_mode = Texture::CLAMP; } else { throw std::runtime_error("Error: Wrong AddressMode parameter."); } TextureManager::Manager().loadImageTexture(image_path.c_str(), Texture::SampleParams(filter_mode, address_mode)); } else if (std::string(element->Attribute("type")) == "Perlin") { child = element->FirstChildElement("PerlinMode"); if (child) { stream << child->GetText() << std::endl; } else { stream << "patch" << std::endl; } child = element->FirstChildElement("ScalingFactor"); if (child) { stream << child->GetText() << std::endl; } else { stream << "1" << std::endl; } stream >> perlin_mode_str >> perlin_scale; //PerlinMode if (perlin_mode_str == "patch") { perlin_mode = Texture::PATCH; } else if (perlin_mode_str == "vein") { perlin_mode = Texture::VEIN; } else { throw std::runtime_error("Error: Wrong PerlinMode parameter."); } TextureManager::Manager().loadPerlinTexture(Texture::SampleParams(perlin_mode, perlin_scale)); } else { throw std::runtime_error("Error: Wrong texture type."); } } else { throw std::runtime_error("Error: Texture type is not specified."); } element = element->NextSiblingElement("Texture"); stream.clear(); } if (TextureManager::Manager().get_textures().size()) { m_scene->addTextures(TextureManager::Manager().get_textures()); } } //Materials element = root->FirstChildElement("Materials"); std::vector<Material> materials; glm::vec3 dont_care3f(0.0f, 0.0f, 0.0f); float dont_care1f = 0.0f; glm::vec3 emission, diffuse, specular, mirror, tint; int texture_id; float tint_distance, ior, scattering_coefficient, anisotropy; element = element->FirstChildElement("Material"); while (element) { if (element->Attribute("type")) { if (std::string(element->Attribute("type")) == "Emissive") { child = element->FirstChildElement("Emission"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material Emission is not specified."); } stream >> emission.x >> emission.y >> emission.z; materials.push_back(Material(emission, dont_care3f, dont_care3f, dont_care3f, dont_care3f, dont_care1f, dont_care1f, dont_care1f, nullptr, dont_care1f, Material::EMISSIVE)); } else if (std::string(element->Attribute("type")) == "Lambertian") { child = element->FirstChildElement("DiffuseReflectance"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material DiffuseReflectance is not specified."); } child = element->FirstChildElement("DiffuseTexture"); if (child) { stream << child->GetText() << std::endl; } else { stream << "-1" << std::endl; } stream >> diffuse.x >> diffuse.y >> diffuse.z; stream >> texture_id; const Texture* diffuse_texture_ptr = nullptr; if (texture_id >= 0) { diffuse_texture_ptr = m_scene->textures.first + texture_id; } materials.push_back(Material(dont_care3f, diffuse, dont_care3f, dont_care3f, dont_care3f, dont_care1f, dont_care1f, dont_care1f, diffuse_texture_ptr, dont_care1f, Material::LAMBERTIAN)); } else if (std::string(element->Attribute("type")) == "PerfectSpecular") { child = element->FirstChildElement("SpecularReflectance"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material SpecularReflectance is not specified."); } stream >> mirror.x >> mirror.y >> mirror.z; materials.push_back(Material(dont_care3f, dont_care3f, dont_care3f, mirror, dont_care3f, dont_care1f, dont_care1f, dont_care1f, nullptr, dont_care1f, Material::PERFECT_SPECULAR)); } else if (std::string(element->Attribute("type")) == "PerfectRefractive") { child = element->FirstChildElement("Tint"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material Tint is not specified."); } child = element->FirstChildElement("TintDistance"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material TintDistance is not specified."); } child = element->FirstChildElement("IOR"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material IOR is not specified."); } stream >> tint.x >> tint.y >> tint.z; stream >> tint_distance; stream >> ior; materials.push_back(Material(dont_care3f, dont_care3f, dont_care3f, dont_care3f, tint, tint_distance, dont_care1f, dont_care1f, nullptr, ior, Material::PERFECT_REFRACTIVE)); } else if (std::string(element->Attribute("type")) == "Translucent") { child = element->FirstChildElement("Tint"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material Tint is not specified."); } child = element->FirstChildElement("TintDistance"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material TintDistance is not specified."); } child = element->FirstChildElement("IOR"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material IOR is not specified."); } child = element->FirstChildElement("ScatteringCoefficient"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material ScatteringCoefficient is not specified."); } child = element->FirstChildElement("Anisotropy"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Material Anisotropy is not specified."); } stream >> tint.x >> tint.y >> tint.z; stream >> tint_distance; stream >> ior; stream >> scattering_coefficient; stream >> anisotropy; materials.push_back(Material(dont_care3f, dont_care3f, dont_care3f, dont_care3f, tint, tint_distance, scattering_coefficient, anisotropy, nullptr, ior, Material::TRANSLUCENT)); } else { throw std::runtime_error("Error: Wrong material type."); } } else { throw std::runtime_error("Error: Material type is not specified."); } element = element->NextSiblingElement("Material"); stream.clear(); } if (materials.size()) { m_scene->addMaterials(materials); } //Objects-Spheres std::vector<Sphere> spheres; unsigned int material_id; element = root->FirstChildElement("Objects"); if (!element) { throw std::runtime_error("Error: There is no object to render."); } element = element->FirstChildElement("Sphere"); while (element) { glm::mat4 sphere_transformation; child = element->FirstChildElement("Transformation"); if (child) { glm::vec3 scaling, rotation, translation; float angle; auto child2 = child->FirstChildElement("Scaling"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "1 1 1" << std::endl; } child2 = child->FirstChildElement("Rotation"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "1 0 0 0" << std::endl; } child2 = child->FirstChildElement("Translation"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "0 0 0" << std::endl; } stream >> scaling.x >> scaling.y >> scaling.z; stream >> rotation.x >> rotation.y >> rotation.z >> angle; stream >> translation.x >> translation.y >> translation.z; stream.clear(); sphere_transformation = glm::scale(scaling) * sphere_transformation; sphere_transformation = glm::rotate(glm::radians(angle), rotation) * sphere_transformation; sphere_transformation = glm::translate(translation) * sphere_transformation; } child = element->FirstChildElement("Material"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Sphere Material is not specified."); } stream >> material_id; spheres.push_back(Sphere(Instance(sphere_transformation), material_id)); element = element->NextSiblingElement("Sphere"); stream.clear(); } if (spheres.size()) { m_scene->addSpheres(spheres); } //Objects-Mesh std::map<int, Mesh> meshes; element = root->FirstChildElement("Objects"); element = element->FirstChildElement("Mesh"); while (element) { int mesh_id; if (element->Attribute("id")) { mesh_id = std::stoi(element->Attribute("id")); } else { throw std::runtime_error("Error: Mesh id is not specified."); } //Transformation glm::mat4 mesh_transformation; child = element->FirstChildElement("Transformation"); if (child) { glm::vec3 scaling, rotation, translation; float angle; auto child2 = child->FirstChildElement("Scaling"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "1 1 1" << std::endl; } child2 = child->FirstChildElement("Rotation"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "1 0 0 0" << std::endl; } child2 = child->FirstChildElement("Translation"); if (child2) { stream << child2->GetText() << std::endl; } else { stream << "0 0 0" << std::endl; } stream >> scaling.x >> scaling.y >> scaling.z; stream >> rotation.x >> rotation.y >> rotation.z >> angle; stream >> translation.x >> translation.y >> translation.z; stream.clear(); mesh_transformation = glm::scale(scaling) * mesh_transformation; mesh_transformation = glm::rotate(glm::radians(angle), rotation) * mesh_transformation; mesh_transformation = glm::translate(translation) * mesh_transformation; } //Material child = element->FirstChildElement("Material"); if (child) { stream << child->GetText() << std::endl; } else { throw std::runtime_error("Error: Mesh Material is not specified."); } stream >> material_id; int instance_of = -1; auto element_instance_of = root->FirstChildElement("Objects")->FirstChildElement("Mesh"); if (element->Attribute("instanceOf")) { instance_of = std::stoi(element->Attribute("instanceOf")); while (std::stoi(element_instance_of->Attribute("id")) != instance_of) { element_instance_of = element_instance_of->NextSiblingElement("Mesh"); } } //Build the mesh. std::vector<Triangle> mesh_triangles; BBox mesh_bbox; if (instance_of >= 0) { child = element_instance_of->FirstChildElement("Data"); } else { child = element->FirstChildElement("Data"); } //Use assimp to load the model. if (child) { Assimp::Importer importer; const aiScene* scene = importer.ReadFile(child->GetText(), aiProcess_Triangulate | aiProcess_JoinIdenticalVertices | aiProcess_GenSmoothNormals); if (!scene) { throw std::runtime_error("Error: Assimp cannot load the model."); } if (scene->mNumMeshes > 1) { throw std::runtime_error("Error: More than one mesh to handle."); } aiMesh* mesh = scene->mMeshes[0]; int face_count = mesh->mNumFaces; glm::vec3 face_vertices[3]; glm::vec3 face_normals[3]; glm::vec2 face_uvs[3]; for (int i = 0; i < face_count; ++i) { const aiFace& face = mesh->mFaces[i]; for (int k = 0; k < 3; ++k) { const aiVector3D& position = mesh->mVertices[face.mIndices[k]]; const aiVector3D& normal = mesh->mNormals[face.mIndices[k]]; aiVector3D uv = mesh->HasTextureCoords(0) ? mesh->mTextureCoords[0][face.mIndices[k]] : aiVector3D(0.0f, 0.0f, 0.0f); face_vertices[k] = glm::vec3(position.x, position.y, position.z); face_normals[k] = glm::vec3(normal.x, normal.y, normal.z); face_uvs[k] = glm::vec2(uv.x, uv.y); } mesh_bbox.extend(mesh_transformation * glm::vec4(face_vertices[0], 1.0f)); mesh_bbox.extend(mesh_transformation * glm::vec4(face_vertices[1], 1.0f)); mesh_bbox.extend(mesh_transformation * glm::vec4(face_vertices[2], 1.0f)); mesh_triangles.push_back(Triangle(face_vertices[0], face_vertices[1] - face_vertices[0], face_vertices[2] - face_vertices[0], face_normals[0], face_normals[1], face_normals[2], face_uvs[0], face_uvs[1], face_uvs[2])); } } else { throw std::runtime_error("Error: Mesh Data is not specified."); } Mesh mesh(Instance(mesh_transformation), mesh_bbox, material_id); if (instance_of >= 0) { mesh.createInstanceMesh(meshes[instance_of]); } else { mesh.createBaseMesh(mesh_triangles); } meshes[mesh_id] = mesh; element = element->NextSiblingElement("Mesh"); stream.clear(); } if (meshes.size()) { auto meshes_size = meshes.size(); std::vector<Mesh> meshes_vector(meshes_size); for (int i = 0; i < meshes_size; ++i) { meshes_vector[i] = meshes[i]; } m_scene->addMeshes(meshes_vector); } } //Photo mode void World::photo() { auto& image = m_renderer->render(*m_camera, *m_scene, 0); image.save(std::string("C://Users//Mustafa//Desktop//") + m_image_name.c_str()); } //Video mode void World::video() { m_system.reset(new System(m_screen_width, m_screen_height)); Timer fps_timer; Timer timer; fps_timer.start(); timer.start(); int counter = 0; int sample_count = 0; int jitter_dimension = 2; int unbiased_at = 1; bool save_image = false; while (true) { auto& image = m_renderer->render(*m_camera, *m_scene, sample_count); m_system->updateWindow(image.get_image()); ++sample_count; ++counter; if (sample_count >= unbiased_at) { unbiased_at += (jitter_dimension * jitter_dimension); ++jitter_dimension; if (save_image) { image.save(std::string("C://Users//Mustafa//Desktop//") + "_" + std::to_string(sample_count) + "_" + m_image_name.c_str()); save_image = false; } } m_system->setWindowTitle("FPS: " + std::to_string(counter / fps_timer.getTime()) + " -_- Sample Count: " + std::to_string(sample_count) + " -_- Unbiased At: " + std::to_string(unbiased_at)); //CAMERA INPUTS// // // // auto delta_time = timer.getTime(); timer.start(); //Poll events before handling. glfwPollEvents(); //Handle keyboard inputs. if (m_system->queryKey(GLFW_KEY_ESCAPE, GLFW_PRESS)) { break; } if (m_system->queryKey(GLFW_KEY_ENTER, GLFW_PRESS)) { save_image = true; } auto right_disp = 0.0f; auto forward_disp = 0.0f; if (m_system->queryKey(GLFW_KEY_W, GLFW_PRESS)) { forward_disp += m_camera_speed * delta_time; } if (m_system->queryKey(GLFW_KEY_S, GLFW_PRESS)) { forward_disp -= m_camera_speed * delta_time; } if (m_system->queryKey(GLFW_KEY_D, GLFW_PRESS)) { right_disp += m_camera_speed * delta_time; } if (m_system->queryKey(GLFW_KEY_A, GLFW_PRESS)) { right_disp -= m_camera_speed * delta_time; } //Handle mouse inputs. auto current_x = 0.0; auto current_y = 0.0; m_system->getCursorPosition(current_x, current_y); static auto last_x = current_x; static auto last_y = current_y; auto x_offset = last_x - current_x; auto y_offset = last_y - current_y; last_x = current_x; last_y = current_y; x_offset *= 0.002f; y_offset *= 0.002f; //Update m_camera's position and orientation. if (right_disp != 0 || forward_disp != 0) { m_camera->move(right_disp, forward_disp); fps_timer.start(); counter = 0; sample_count = 0; jitter_dimension = 2; unbiased_at = 1; } if (x_offset != 0 || y_offset != 0) { m_camera->rotate(x_offset, y_offset); fps_timer.start(); counter = 0; sample_count = 0; jitter_dimension = 2; unbiased_at = 1; } // // // //CAMERA INPUTS// } }
681d03098cda3169c0bcacf2d102c153568ab5ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* modified by saven. on March 07. complete working on shared memory. */ #ifndef _SCAN_BEST_KERNEL_H_ #define _SCAN_BEST_KERNEL_H_ #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 // Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree //#define ZERO_BANK_CONFLICTS #ifdef CHECK_BANK_CONFLICTS #define TEMP(index) CUT_BANK_CHECKER(temp, index) #else #define TEMP(index) temp[index] #endif __device__ void scanPS(int *g_odata, int *g_idata, int n) { extern __shared__ int partial_sum[]; // Dynamically allocated shared memory for scan kernels int tid = threadIdx.x; int ai = tid; int bi = tid + (n>>1); // compute spacing to avoid bank conflicts int bankOffset1 = (ai >> LOG_NUM_BANKS); int bankOffset2 = (bi >> LOG_NUM_BANKS); // Cache the computational window in shared memory partial_sum[ai + bankOffset1] = g_idata[ai]; partial_sum[bi + bankOffset2] = g_idata[bi]; int offset = 1; // build the sum in place up the tree for (int d = n>>1; d > 0; d >>= 1) { __syncthreads(); if (tid < d) { int ai = offset*(2*tid+1)-1; int bi = offset*(2*tid+2)-1; ai += (ai >> LOG_NUM_BANKS); bi += (bi >> LOG_NUM_BANKS); partial_sum[bi] += partial_sum[ai]; } offset *= 2; } // scan back down the tree // clear the last element if (tid == 0) { int conflict_offset = (n-1) >> LOG_NUM_BANKS; partial_sum[n - 1 + conflict_offset] = 0; } // traverse down the tree building the scan in place for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (tid < d) { int ai = offset*(2*tid+1)-1; int bi = offset*(2*tid+2)-1; ai += (ai >> LOG_NUM_BANKS); bi += (bi >> LOG_NUM_BANKS); int t = partial_sum[ai]; partial_sum[ai] = partial_sum[bi]; partial_sum[bi] += t; } } __syncthreads(); // write results to global memory g_odata[ai] = partial_sum[ai + bankOffset1]; g_odata[bi] = partial_sum[bi + bankOffset2]; } __global__ void computePS(int *g_odata, int *g_idata, int blockSize) { int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int start=blockSize*bid; scanPS((g_odata+start), (g_idata+start), blockSize); } #endif // #ifndef _SCAN_BEST_KERNEL_H_
681d03098cda3169c0bcacf2d102c153568ab5ea.cu
/* modified by saven. on March 07. complete working on shared memory. */ #ifndef _SCAN_BEST_KERNEL_H_ #define _SCAN_BEST_KERNEL_H_ #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 // Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree //#define ZERO_BANK_CONFLICTS #ifdef CHECK_BANK_CONFLICTS #define TEMP(index) CUT_BANK_CHECKER(temp, index) #else #define TEMP(index) temp[index] #endif __device__ void scanPS(int *g_odata, int *g_idata, int n) { extern __shared__ int partial_sum[]; // Dynamically allocated shared memory for scan kernels int tid = threadIdx.x; int ai = tid; int bi = tid + (n>>1); // compute spacing to avoid bank conflicts int bankOffset1 = (ai >> LOG_NUM_BANKS); int bankOffset2 = (bi >> LOG_NUM_BANKS); // Cache the computational window in shared memory partial_sum[ai + bankOffset1] = g_idata[ai]; partial_sum[bi + bankOffset2] = g_idata[bi]; int offset = 1; // build the sum in place up the tree for (int d = n>>1; d > 0; d >>= 1) { __syncthreads(); if (tid < d) { int ai = offset*(2*tid+1)-1; int bi = offset*(2*tid+2)-1; ai += (ai >> LOG_NUM_BANKS); bi += (bi >> LOG_NUM_BANKS); partial_sum[bi] += partial_sum[ai]; } offset *= 2; } // scan back down the tree // clear the last element if (tid == 0) { int conflict_offset = (n-1) >> LOG_NUM_BANKS; partial_sum[n - 1 + conflict_offset] = 0; } // traverse down the tree building the scan in place for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (tid < d) { int ai = offset*(2*tid+1)-1; int bi = offset*(2*tid+2)-1; ai += (ai >> LOG_NUM_BANKS); bi += (bi >> LOG_NUM_BANKS); int t = partial_sum[ai]; partial_sum[ai] = partial_sum[bi]; partial_sum[bi] += t; } } __syncthreads(); // write results to global memory g_odata[ai] = partial_sum[ai + bankOffset1]; g_odata[bi] = partial_sum[bi + bankOffset2]; } __global__ void computePS(int *g_odata, int *g_idata, int blockSize) { int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int start=blockSize*bid; scanPS((g_odata+start), (g_idata+start), blockSize); } #endif // #ifndef _SCAN_BEST_KERNEL_H_
05148709660e931848a36544ce09f0fef652cdba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU (<<<1, 1>>> specifies thread allocation/ number of threads) hipLaunchKernelGGL(( add), dim3(1), dim3(256), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
05148709660e931848a36544ce09f0fef652cdba.cu
#include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU (<<<1, 1>>> specifies thread allocation/ number of threads) add<<<1, 256>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
cc1ea60e2572ca3365711c077f9ba250e994376f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zbajac_csr_overlap.cu, normal z -> c, Mon Jun 25 18:24:27 2018 */ #include "magmasparse_internal.h" #define PRECISION_c #define BLOCKSIZE 256 __global__ void magma_ck_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_cbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_c_matrix *D, magma_c_matrix *R, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_cbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD, magma_index_t * rowD, magma_index_t * colD, magmaFloatComplex * valR, magma_index_t * rowR, magma_index_t * colR, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaFloatComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaFloatComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaFloatComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaFloatComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaFloatComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaFloatComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaFloatComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaFloatComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, magmaFloatComplex * valD4, magma_index_t * rowD4, magma_index_t * colD4, magmaFloatComplex * valR4, magma_index_t * rowR4, magma_index_t * colR4, magmaFloatComplex * valD5, magma_index_t * rowD5, magma_index_t * colD5, magmaFloatComplex * valR5, magma_index_t * rowR5, magma_index_t * colR5, magmaFloatComplex * valD6, magma_index_t * rowD6, magma_index_t * colD6, magmaFloatComplex * valR6, magma_index_t * rowR6, magma_index_t * colR6, magmaFloatComplex * valD7, magma_index_t * rowD7, magma_index_t * colD7, magmaFloatComplex * valR7, magma_index_t * rowR7, magma_index_t * colR7, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaFloatComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaFloatComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaFloatComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaFloatComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaFloatComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaFloatComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaFloatComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaFloatComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaFloatComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaFloatComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaFloatComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaFloatComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaFloatComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaFloatComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaFloatComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaFloatComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaFloatComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaFloatComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaFloatComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaFloatComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaFloatComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaFloatComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaFloatComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaFloatComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaFloatComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaFloatComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaFloatComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaFloatComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaFloatComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaFloatComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaFloatComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaFloatComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaFloatComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaFloatComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaFloatComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaFloatComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaFloatComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaFloatComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaFloatComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaFloatComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaFloatComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaFloatComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaFloatComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaFloatComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaFloatComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaFloatComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaFloatComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaFloatComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaFloatComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaFloatComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaFloatComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaFloatComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaFloatComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaFloatComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaFloatComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaFloatComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaFloatComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaFloatComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaFloatComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaFloatComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaFloatComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaFloatComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaFloatComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaFloatComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, magmaFloatComplex *valD32, magma_index_t *rowD32, magma_index_t *colD32, magmaFloatComplex *valR32, magma_index_t *rowR32, magma_index_t *colR32, magmaFloatComplex *valD33, magma_index_t *rowD33, magma_index_t *colD33, magmaFloatComplex *valR33, magma_index_t *rowR33, magma_index_t *colR33, magmaFloatComplex *valD34, magma_index_t *rowD34, magma_index_t *colD34, magmaFloatComplex *valR34, magma_index_t *rowR34, magma_index_t *colR34, magmaFloatComplex *valD35, magma_index_t *rowD35, magma_index_t *colD35, magmaFloatComplex *valR35, magma_index_t *rowR35, magma_index_t *colR35, magmaFloatComplex *valD36, magma_index_t *rowD36, magma_index_t *colD36, magmaFloatComplex *valR36, magma_index_t *rowR36, magma_index_t *colR36, magmaFloatComplex *valD37, magma_index_t *rowD37, magma_index_t *colD37, magmaFloatComplex *valR37, magma_index_t *rowR37, magma_index_t *colR37, magmaFloatComplex *valD38, magma_index_t *rowD38, magma_index_t *colD38, magmaFloatComplex *valR38, magma_index_t *rowR38, magma_index_t *colR38, magmaFloatComplex *valD39, magma_index_t *rowD39, magma_index_t *colD39, magmaFloatComplex *valR39, magma_index_t *rowR39, magma_index_t *colR39, magmaFloatComplex *valD40, magma_index_t *rowD40, magma_index_t *colD40, magmaFloatComplex *valR40, magma_index_t *rowR40, magma_index_t *colR40, magmaFloatComplex *valD41, magma_index_t *rowD41, magma_index_t *colD41, magmaFloatComplex *valR41, magma_index_t *rowR41, magma_index_t *colR41, magmaFloatComplex *valD42, magma_index_t *rowD42, magma_index_t *colD42, magmaFloatComplex *valR42, magma_index_t *rowR42, magma_index_t *colR42, magmaFloatComplex *valD43, magma_index_t *rowD43, magma_index_t *colD43, magmaFloatComplex *valR43, magma_index_t *rowR43, magma_index_t *colR43, magmaFloatComplex *valD44, magma_index_t *rowD44, magma_index_t *colD44, magmaFloatComplex *valR44, magma_index_t *rowR44, magma_index_t *colR44, magmaFloatComplex *valD45, magma_index_t *rowD45, magma_index_t *colD45, magmaFloatComplex *valR45, magma_index_t *rowR45, magma_index_t *colR45, magmaFloatComplex *valD46, magma_index_t *rowD46, magma_index_t *colD46, magmaFloatComplex *valR46, magma_index_t *rowR46, magma_index_t *colR46, magmaFloatComplex *valD47, magma_index_t *rowD47, magma_index_t *colD47, magmaFloatComplex *valR47, magma_index_t *rowR47, magma_index_t *colR47, magmaFloatComplex *valD48, magma_index_t *rowD48, magma_index_t *colD48, magmaFloatComplex *valR48, magma_index_t *rowR48, magma_index_t *colR48, magmaFloatComplex *valD49, magma_index_t *rowD49, magma_index_t *colD49, magmaFloatComplex *valR49, magma_index_t *rowR49, magma_index_t *colR49, magmaFloatComplex *valD50, magma_index_t *rowD50, magma_index_t *colD50, magmaFloatComplex *valR50, magma_index_t *rowR50, magma_index_t *colR50, magmaFloatComplex *valD51, magma_index_t *rowD51, magma_index_t *colD51, magmaFloatComplex *valR51, magma_index_t *rowR51, magma_index_t *colR51, magmaFloatComplex *valD52, magma_index_t *rowD52, magma_index_t *colD52, magmaFloatComplex *valR52, magma_index_t *rowR52, magma_index_t *colR52, magmaFloatComplex *valD53, magma_index_t *rowD53, magma_index_t *colD53, magmaFloatComplex *valR53, magma_index_t *rowR53, magma_index_t *colR53, magmaFloatComplex *valD54, magma_index_t *rowD54, magma_index_t *colD54, magmaFloatComplex *valR54, magma_index_t *rowR54, magma_index_t *colR54, magmaFloatComplex *valD55, magma_index_t *rowD55, magma_index_t *colD55, magmaFloatComplex *valR55, magma_index_t *rowR55, magma_index_t *colR55, magmaFloatComplex *valD56, magma_index_t *rowD56, magma_index_t *colD56, magmaFloatComplex *valR56, magma_index_t *rowR56, magma_index_t *colR56, magmaFloatComplex *valD57, magma_index_t *rowD57, magma_index_t *colD57, magmaFloatComplex *valR57, magma_index_t *rowR57, magma_index_t *colR57, magmaFloatComplex *valD58, magma_index_t *rowD58, magma_index_t *colD58, magmaFloatComplex *valR58, magma_index_t *rowR58, magma_index_t *colR58, magmaFloatComplex *valD59, magma_index_t *rowD59, magma_index_t *colD59, magmaFloatComplex *valR59, magma_index_t *rowR59, magma_index_t *colR59, magmaFloatComplex *valD60, magma_index_t *rowD60, magma_index_t *colD60, magmaFloatComplex *valR60, magma_index_t *rowR60, magma_index_t *colR60, magmaFloatComplex *valD61, magma_index_t *rowD61, magma_index_t *colD61, magmaFloatComplex *valR61, magma_index_t *rowR61, magma_index_t *colR61, magmaFloatComplex *valD62, magma_index_t *rowD62, magma_index_t *colD62, magmaFloatComplex *valR62, magma_index_t *rowR62, magma_index_t *colR62, magmaFloatComplex *valD63, magma_index_t *rowD63, magma_index_t *colD63, magmaFloatComplex *valR63, magma_index_t *rowR63, magma_index_t *colR63, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_c_matrix* set of matrices with diagonal blocks @param[in] R magma_c_matrix* set of matrices with non-diagonal parts @param[in] b magma_c_matrix RHS @param[in] x magma_c_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_c_matrix *D, magma_c_matrix *R, magma_c_matrix b, magma_c_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel1), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel2), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel4), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel8), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel16), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel32), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel64), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
cc1ea60e2572ca3365711c077f9ba250e994376f.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zbajac_csr_overlap.cu, normal z -> c, Mon Jun 25 18:24:27 2018 */ #include "magmasparse_internal.h" #define PRECISION_c #define BLOCKSIZE 256 __global__ void magma_ck_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_cbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_c_matrix *D, magma_c_matrix *R, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_cbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD, magma_index_t * rowD, magma_index_t * colD, magmaFloatComplex * valR, magma_index_t * rowR, magma_index_t * colR, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaFloatComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaFloatComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaFloatComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaFloatComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaFloatComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaFloatComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaFloatComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaFloatComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, magmaFloatComplex * valD4, magma_index_t * rowD4, magma_index_t * colD4, magmaFloatComplex * valR4, magma_index_t * rowR4, magma_index_t * colR4, magmaFloatComplex * valD5, magma_index_t * rowD5, magma_index_t * colD5, magmaFloatComplex * valR5, magma_index_t * rowR5, magma_index_t * colR5, magmaFloatComplex * valD6, magma_index_t * rowD6, magma_index_t * colD6, magmaFloatComplex * valR6, magma_index_t * rowR6, magma_index_t * colR6, magmaFloatComplex * valD7, magma_index_t * rowD7, magma_index_t * colD7, magmaFloatComplex * valR7, magma_index_t * rowR7, magma_index_t * colR7, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaFloatComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaFloatComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaFloatComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaFloatComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaFloatComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaFloatComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaFloatComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaFloatComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaFloatComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaFloatComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaFloatComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaFloatComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaFloatComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaFloatComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaFloatComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaFloatComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaFloatComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaFloatComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaFloatComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaFloatComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaFloatComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaFloatComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaFloatComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaFloatComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaFloatComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaFloatComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaFloatComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaFloatComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaFloatComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaFloatComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaFloatComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaFloatComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaFloatComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaFloatComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaFloatComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaFloatComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaFloatComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaFloatComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaFloatComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaFloatComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaFloatComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaFloatComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaFloatComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaFloatComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaFloatComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaFloatComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaFloatComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaFloatComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaFloatComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaFloatComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaFloatComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaFloatComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaFloatComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaFloatComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaFloatComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaFloatComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaFloatComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaFloatComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaFloatComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaFloatComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaFloatComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaFloatComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaFloatComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaFloatComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, magmaFloatComplex *valD32, magma_index_t *rowD32, magma_index_t *colD32, magmaFloatComplex *valR32, magma_index_t *rowR32, magma_index_t *colR32, magmaFloatComplex *valD33, magma_index_t *rowD33, magma_index_t *colD33, magmaFloatComplex *valR33, magma_index_t *rowR33, magma_index_t *colR33, magmaFloatComplex *valD34, magma_index_t *rowD34, magma_index_t *colD34, magmaFloatComplex *valR34, magma_index_t *rowR34, magma_index_t *colR34, magmaFloatComplex *valD35, magma_index_t *rowD35, magma_index_t *colD35, magmaFloatComplex *valR35, magma_index_t *rowR35, magma_index_t *colR35, magmaFloatComplex *valD36, magma_index_t *rowD36, magma_index_t *colD36, magmaFloatComplex *valR36, magma_index_t *rowR36, magma_index_t *colR36, magmaFloatComplex *valD37, magma_index_t *rowD37, magma_index_t *colD37, magmaFloatComplex *valR37, magma_index_t *rowR37, magma_index_t *colR37, magmaFloatComplex *valD38, magma_index_t *rowD38, magma_index_t *colD38, magmaFloatComplex *valR38, magma_index_t *rowR38, magma_index_t *colR38, magmaFloatComplex *valD39, magma_index_t *rowD39, magma_index_t *colD39, magmaFloatComplex *valR39, magma_index_t *rowR39, magma_index_t *colR39, magmaFloatComplex *valD40, magma_index_t *rowD40, magma_index_t *colD40, magmaFloatComplex *valR40, magma_index_t *rowR40, magma_index_t *colR40, magmaFloatComplex *valD41, magma_index_t *rowD41, magma_index_t *colD41, magmaFloatComplex *valR41, magma_index_t *rowR41, magma_index_t *colR41, magmaFloatComplex *valD42, magma_index_t *rowD42, magma_index_t *colD42, magmaFloatComplex *valR42, magma_index_t *rowR42, magma_index_t *colR42, magmaFloatComplex *valD43, magma_index_t *rowD43, magma_index_t *colD43, magmaFloatComplex *valR43, magma_index_t *rowR43, magma_index_t *colR43, magmaFloatComplex *valD44, magma_index_t *rowD44, magma_index_t *colD44, magmaFloatComplex *valR44, magma_index_t *rowR44, magma_index_t *colR44, magmaFloatComplex *valD45, magma_index_t *rowD45, magma_index_t *colD45, magmaFloatComplex *valR45, magma_index_t *rowR45, magma_index_t *colR45, magmaFloatComplex *valD46, magma_index_t *rowD46, magma_index_t *colD46, magmaFloatComplex *valR46, magma_index_t *rowR46, magma_index_t *colR46, magmaFloatComplex *valD47, magma_index_t *rowD47, magma_index_t *colD47, magmaFloatComplex *valR47, magma_index_t *rowR47, magma_index_t *colR47, magmaFloatComplex *valD48, magma_index_t *rowD48, magma_index_t *colD48, magmaFloatComplex *valR48, magma_index_t *rowR48, magma_index_t *colR48, magmaFloatComplex *valD49, magma_index_t *rowD49, magma_index_t *colD49, magmaFloatComplex *valR49, magma_index_t *rowR49, magma_index_t *colR49, magmaFloatComplex *valD50, magma_index_t *rowD50, magma_index_t *colD50, magmaFloatComplex *valR50, magma_index_t *rowR50, magma_index_t *colR50, magmaFloatComplex *valD51, magma_index_t *rowD51, magma_index_t *colD51, magmaFloatComplex *valR51, magma_index_t *rowR51, magma_index_t *colR51, magmaFloatComplex *valD52, magma_index_t *rowD52, magma_index_t *colD52, magmaFloatComplex *valR52, magma_index_t *rowR52, magma_index_t *colR52, magmaFloatComplex *valD53, magma_index_t *rowD53, magma_index_t *colD53, magmaFloatComplex *valR53, magma_index_t *rowR53, magma_index_t *colR53, magmaFloatComplex *valD54, magma_index_t *rowD54, magma_index_t *colD54, magmaFloatComplex *valR54, magma_index_t *rowR54, magma_index_t *colR54, magmaFloatComplex *valD55, magma_index_t *rowD55, magma_index_t *colD55, magmaFloatComplex *valR55, magma_index_t *rowR55, magma_index_t *colR55, magmaFloatComplex *valD56, magma_index_t *rowD56, magma_index_t *colD56, magmaFloatComplex *valR56, magma_index_t *rowR56, magma_index_t *colR56, magmaFloatComplex *valD57, magma_index_t *rowD57, magma_index_t *colD57, magmaFloatComplex *valR57, magma_index_t *rowR57, magma_index_t *colR57, magmaFloatComplex *valD58, magma_index_t *rowD58, magma_index_t *colD58, magmaFloatComplex *valR58, magma_index_t *rowR58, magma_index_t *colR58, magmaFloatComplex *valD59, magma_index_t *rowD59, magma_index_t *colD59, magmaFloatComplex *valR59, magma_index_t *rowR59, magma_index_t *colR59, magmaFloatComplex *valD60, magma_index_t *rowD60, magma_index_t *colD60, magmaFloatComplex *valR60, magma_index_t *rowR60, magma_index_t *colR60, magmaFloatComplex *valD61, magma_index_t *rowD61, magma_index_t *colD61, magmaFloatComplex *valR61, magma_index_t *rowR61, magma_index_t *colR61, magmaFloatComplex *valD62, magma_index_t *rowD62, magma_index_t *colD62, magmaFloatComplex *valR62, magma_index_t *rowR62, magma_index_t *colR62, magmaFloatComplex *valD63, magma_index_t *rowD63, magma_index_t *colD63, magmaFloatComplex *valR63, magma_index_t *rowR63, magma_index_t *colR63, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_c_matrix* set of matrices with diagonal blocks @param[in] R magma_c_matrix* set of matrices with non-diagonal parts @param[in] b magma_c_matrix RHS @param[in] x magma_c_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_c_matrix *D, magma_c_matrix *R, magma_c_matrix b, magma_c_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel1<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel2<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel4<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel8<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel16<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel32<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel64<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
d2867d4892a06e25ae8cc9efe27da5171a181233.hip
// !!! This is a file automatically generated by hipify!!! ///////////////////////////////////////////////// DO NOT CHANGE /////////////////////////////////////// #include "ex1.h" #define SQR(a) ((a) * (a)) long long int distance_sqr_between_image_arrays(uchar *img_arr1, uchar *img_arr2) { long long int distance_sqr = 0; for (int i = 0; i < N_IMAGES * IMG_WIDTH * IMG_HEIGHT; i++) { distance_sqr += SQR(img_arr1[i] - img_arr2[i]); } return distance_sqr; } int randomize_images(uchar *images) { FILE *urandom = fopen("/dev/urandom", "r"); if (!urandom) { perror("Unable to open /dev/urandom"); return 1; } int ret = fread(images, 1, N_IMAGES * IMG_WIDTH * IMG_HEIGHT, urandom); if (ret < N_IMAGES * IMG_HEIGHT * IMG_WIDTH) { perror("Unable to read random data"); fclose(urandom); return 1; } fclose(urandom); return 0; } int main() { uchar *images_in; uchar *images_out_cpu; //output of CPU computation. In CPU memory. uchar *images_out_gpu_serial; //output of GPU task serial computation. In CPU memory. uchar *images_out_gpu_bulk; //output of GPU bulk computation. In CPU memory. int devices; CUDA_CHECK( hipGetDeviceCount(&devices) ); printf("Number of devices: %d\n", devices); CUDA_CHECK( hipHostMalloc(&images_in, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); CUDA_CHECK( hipHostMalloc(&images_out_cpu, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); CUDA_CHECK( hipHostMalloc(&images_out_gpu_serial, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); CUDA_CHECK( hipHostMalloc(&images_out_gpu_bulk, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); double t_start, t_finish; /* instead of loading real images, we'll load the arrays with random data */ printf("\n=== Randomizing images ===\n"); t_start = get_time_msec(); if (randomize_images(images_in)) return 1; t_finish = get_time_msec(); printf("total time %f [msec]\n", t_finish - t_start); // CPU computation. For reference. Do not change printf("\n=== CPU ===\n"); t_start = get_time_msec(); for (int i = 0; i < N_IMAGES; i++) { uchar *img_in = &images_in[i * IMG_WIDTH * IMG_HEIGHT]; uchar *img_out = &images_out_cpu[i * IMG_WIDTH * IMG_HEIGHT]; cpu_process(img_in, img_out, IMG_WIDTH, IMG_HEIGHT); } t_finish = get_time_msec(); printf("total time %f [msec]\n", t_finish - t_start); long long int distance_sqr; // GPU task serial computation printf("\n=== GPU Task Serial ===\n"); struct task_serial_context *ts_context = task_serial_init(); t_start = get_time_msec(); task_serial_process(ts_context, images_in, images_out_gpu_serial); t_finish = get_time_msec(); distance_sqr = distance_sqr_between_image_arrays(images_out_cpu, images_out_gpu_serial); printf("total time %f [msec] distance from baseline %lld (should be zero)\n", t_finish - t_start, distance_sqr); task_serial_free(ts_context); // GPU bulk printf("\n=== GPU Bulk ===\n"); struct gpu_bulk_context *gb_context = gpu_bulk_init(); t_start = get_time_msec(); gpu_bulk_process(gb_context, images_in, images_out_gpu_bulk); t_finish = get_time_msec(); distance_sqr = distance_sqr_between_image_arrays(images_out_cpu, images_out_gpu_bulk); printf("total time %f [msec] distance from baseline %lld (should be zero)\n", t_finish - t_start, distance_sqr); gpu_bulk_free(gb_context); return 0; }
d2867d4892a06e25ae8cc9efe27da5171a181233.cu
///////////////////////////////////////////////// DO NOT CHANGE /////////////////////////////////////// #include "ex1.h" #define SQR(a) ((a) * (a)) long long int distance_sqr_between_image_arrays(uchar *img_arr1, uchar *img_arr2) { long long int distance_sqr = 0; for (int i = 0; i < N_IMAGES * IMG_WIDTH * IMG_HEIGHT; i++) { distance_sqr += SQR(img_arr1[i] - img_arr2[i]); } return distance_sqr; } int randomize_images(uchar *images) { FILE *urandom = fopen("/dev/urandom", "r"); if (!urandom) { perror("Unable to open /dev/urandom"); return 1; } int ret = fread(images, 1, N_IMAGES * IMG_WIDTH * IMG_HEIGHT, urandom); if (ret < N_IMAGES * IMG_HEIGHT * IMG_WIDTH) { perror("Unable to read random data"); fclose(urandom); return 1; } fclose(urandom); return 0; } int main() { uchar *images_in; uchar *images_out_cpu; //output of CPU computation. In CPU memory. uchar *images_out_gpu_serial; //output of GPU task serial computation. In CPU memory. uchar *images_out_gpu_bulk; //output of GPU bulk computation. In CPU memory. int devices; CUDA_CHECK( cudaGetDeviceCount(&devices) ); printf("Number of devices: %d\n", devices); CUDA_CHECK( cudaHostAlloc(&images_in, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); CUDA_CHECK( cudaHostAlloc(&images_out_cpu, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); CUDA_CHECK( cudaHostAlloc(&images_out_gpu_serial, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); CUDA_CHECK( cudaHostAlloc(&images_out_gpu_bulk, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) ); double t_start, t_finish; /* instead of loading real images, we'll load the arrays with random data */ printf("\n=== Randomizing images ===\n"); t_start = get_time_msec(); if (randomize_images(images_in)) return 1; t_finish = get_time_msec(); printf("total time %f [msec]\n", t_finish - t_start); // CPU computation. For reference. Do not change printf("\n=== CPU ===\n"); t_start = get_time_msec(); for (int i = 0; i < N_IMAGES; i++) { uchar *img_in = &images_in[i * IMG_WIDTH * IMG_HEIGHT]; uchar *img_out = &images_out_cpu[i * IMG_WIDTH * IMG_HEIGHT]; cpu_process(img_in, img_out, IMG_WIDTH, IMG_HEIGHT); } t_finish = get_time_msec(); printf("total time %f [msec]\n", t_finish - t_start); long long int distance_sqr; // GPU task serial computation printf("\n=== GPU Task Serial ===\n"); struct task_serial_context *ts_context = task_serial_init(); t_start = get_time_msec(); task_serial_process(ts_context, images_in, images_out_gpu_serial); t_finish = get_time_msec(); distance_sqr = distance_sqr_between_image_arrays(images_out_cpu, images_out_gpu_serial); printf("total time %f [msec] distance from baseline %lld (should be zero)\n", t_finish - t_start, distance_sqr); task_serial_free(ts_context); // GPU bulk printf("\n=== GPU Bulk ===\n"); struct gpu_bulk_context *gb_context = gpu_bulk_init(); t_start = get_time_msec(); gpu_bulk_process(gb_context, images_in, images_out_gpu_bulk); t_finish = get_time_msec(); distance_sqr = distance_sqr_between_image_arrays(images_out_cpu, images_out_gpu_bulk); printf("total time %f [msec] distance from baseline %lld (should be zero)\n", t_finish - t_start, distance_sqr); gpu_bulk_free(gb_context); return 0; }
9bb80d785b9275754d2df89ae81dd1bb2d8be309.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * See COPYRIGHT.txt for license information */ #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <getopt.h> #include "utils.h" #define MAX_ITERS 100 #define MAX_SKIP 10 #define THREADS 1024 #define BLOCKS 8 #define MAX_MSG_SIZE 64 * 1024 #define UNROLL 2 __global__ void bw(volatile double *data_d, volatile unsigned int *counter_d, int len, int pe, int iter, int skip, double *bw_result) { int u, i, j, peer, tid, slice; unsigned int counter; long long int start = 0, stop = 0; double time = 0; int threads = gridDim.x * blockDim.x; tid = blockIdx.x * blockDim.x + threadIdx.x; peer = !pe; slice = UNROLL * threads; for (i = 0; i < (iter + skip); i++) { if (i == skip){ nvshmem_quiet(); start = clock64(); } for (j = 0; j < len - slice; j += slice) { for (u = 0; u < UNROLL; ++u) { int idx = j + u * threads + tid; *(data_d + idx) = nvshmem_double_g((double *)data_d + idx, peer); } __syncthreads(); /* This is required for performance over PCIe. PCIe has a P2P mailbox protocol that has a window of 64KB for device BAR addresses. Not synchronizing across threads will lead to jumping in and out of the 64K window */ } for (u = 0; u < UNROLL; ++u) { int idx = j + u * threads + tid; if (idx < len) *(data_d + idx) = nvshmem_double_g((double *)data_d + idx, peer); } // synchronizing across blocks __syncthreads(); if (!threadIdx.x) { __threadfence(); /* To ensure that the data received through shmem_g is visible across the gpu */ counter = atomicInc((unsigned int *)counter_d, UINT_MAX); if (counter == (gridDim.x * (i + 1) - 1)) { *(counter_d + 1) += 1; } while (*(counter_d + 1) != i + 1) ; } __syncthreads(); } // synchronize and call nvshmem_quiet across blocks __syncthreads(); if (!threadIdx.x) { __threadfence(); counter = atomicInc((unsigned int *)counter_d, UINT_MAX); if (counter == (gridDim.x * (i + 1) - 1)) { *(counter_d + 1) += 1; } while (*(counter_d + 1) != i + 1) ; } __syncthreads(); stop = clock64(); time = (stop - start); if (!threadIdx.x && !blockIdx.x) { *bw_result = ((float)iter * (float)len * sizeof(double) * clockrate) / ((time / 1000) * 1024 * 1024 * 1024); } } int main(int argc, char *argv[]) { int mype, npes; double *data_d = NULL; unsigned int *counter_d; int max_blocks = BLOCKS, max_threads = THREADS; int array_size, i; void **h_tables; uint64_t *h_size_arr; double *h_bw; int iter = MAX_ITERS; int skip = MAX_SKIP; int max_msg_size = MAX_MSG_SIZE; init_wrapper(&argc, &argv); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); if (npes != 2) { fprintf(stderr, "This test requires exactly two processes \n"); goto finalize; } while (1) { int c; c = getopt(argc, argv, "c:t:h"); if (c == -1) break; switch (c) { case 'c': max_blocks = strtol(optarg, NULL, 0); break; case 't': max_threads = strtol(optarg, NULL, 0); break; default: case 'h': printf("-c [CTAs] -t [THREADS] \n"); goto finalize; } } array_size = floor(log2((float)max_msg_size)) + 1; alloc_tables(&h_tables, 2, array_size); h_size_arr = (uint64_t *)h_tables[0]; h_bw = (double *)h_tables[1]; data_d = (double *)nvshmem_malloc(max_msg_size); CUDA_CHECK(hipMemset(data_d, 0, max_msg_size)); CUDA_CHECK(hipMalloc((void **)&counter_d, sizeof(unsigned int) * 2)); CUDA_CHECK(hipMemset(counter_d, 0, sizeof(unsigned int) * 2)); CUDA_CHECK(hipDeviceSynchronize()); int size; i = 0; if (mype == 0) { for (size = 1024; size <= MAX_MSG_SIZE; size *= 2) { int blocks = max_blocks, threads = max_threads; h_size_arr[i] = size; CUDA_CHECK(hipMemset(counter_d, 0, sizeof(unsigned int) * 2)); hipLaunchKernelGGL(( bw), dim3(blocks), dim3(threads), 0, 0, data_d, counter_d, size / sizeof(double), mype, iter, skip, &h_bw[i]); CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipDeviceSynchronize()); nvshmem_barrier_all(); i++; } } else { for (size = 1024; size <= MAX_MSG_SIZE; size *= 2) { nvshmem_barrier_all(); } } if (mype == 0) { print_table("shmem_g_bw", "None", "size (Bytes)", "BW", "GB/sec", '+', h_size_arr, h_bw, i); } finalize: if (data_d) nvshmem_free(data_d); free_tables(h_tables, 2); finalize_wrapper(); return 0; }
9bb80d785b9275754d2df89ae81dd1bb2d8be309.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * See COPYRIGHT.txt for license information */ #include <stdio.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #include <getopt.h> #include "utils.h" #define MAX_ITERS 100 #define MAX_SKIP 10 #define THREADS 1024 #define BLOCKS 8 #define MAX_MSG_SIZE 64 * 1024 #define UNROLL 2 __global__ void bw(volatile double *data_d, volatile unsigned int *counter_d, int len, int pe, int iter, int skip, double *bw_result) { int u, i, j, peer, tid, slice; unsigned int counter; long long int start = 0, stop = 0; double time = 0; int threads = gridDim.x * blockDim.x; tid = blockIdx.x * blockDim.x + threadIdx.x; peer = !pe; slice = UNROLL * threads; for (i = 0; i < (iter + skip); i++) { if (i == skip){ nvshmem_quiet(); start = clock64(); } for (j = 0; j < len - slice; j += slice) { for (u = 0; u < UNROLL; ++u) { int idx = j + u * threads + tid; *(data_d + idx) = nvshmem_double_g((double *)data_d + idx, peer); } __syncthreads(); /* This is required for performance over PCIe. PCIe has a P2P mailbox protocol that has a window of 64KB for device BAR addresses. Not synchronizing across threads will lead to jumping in and out of the 64K window */ } for (u = 0; u < UNROLL; ++u) { int idx = j + u * threads + tid; if (idx < len) *(data_d + idx) = nvshmem_double_g((double *)data_d + idx, peer); } // synchronizing across blocks __syncthreads(); if (!threadIdx.x) { __threadfence(); /* To ensure that the data received through shmem_g is visible across the gpu */ counter = atomicInc((unsigned int *)counter_d, UINT_MAX); if (counter == (gridDim.x * (i + 1) - 1)) { *(counter_d + 1) += 1; } while (*(counter_d + 1) != i + 1) ; } __syncthreads(); } // synchronize and call nvshmem_quiet across blocks __syncthreads(); if (!threadIdx.x) { __threadfence(); counter = atomicInc((unsigned int *)counter_d, UINT_MAX); if (counter == (gridDim.x * (i + 1) - 1)) { *(counter_d + 1) += 1; } while (*(counter_d + 1) != i + 1) ; } __syncthreads(); stop = clock64(); time = (stop - start); if (!threadIdx.x && !blockIdx.x) { *bw_result = ((float)iter * (float)len * sizeof(double) * clockrate) / ((time / 1000) * 1024 * 1024 * 1024); } } int main(int argc, char *argv[]) { int mype, npes; double *data_d = NULL; unsigned int *counter_d; int max_blocks = BLOCKS, max_threads = THREADS; int array_size, i; void **h_tables; uint64_t *h_size_arr; double *h_bw; int iter = MAX_ITERS; int skip = MAX_SKIP; int max_msg_size = MAX_MSG_SIZE; init_wrapper(&argc, &argv); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); if (npes != 2) { fprintf(stderr, "This test requires exactly two processes \n"); goto finalize; } while (1) { int c; c = getopt(argc, argv, "c:t:h"); if (c == -1) break; switch (c) { case 'c': max_blocks = strtol(optarg, NULL, 0); break; case 't': max_threads = strtol(optarg, NULL, 0); break; default: case 'h': printf("-c [CTAs] -t [THREADS] \n"); goto finalize; } } array_size = floor(log2((float)max_msg_size)) + 1; alloc_tables(&h_tables, 2, array_size); h_size_arr = (uint64_t *)h_tables[0]; h_bw = (double *)h_tables[1]; data_d = (double *)nvshmem_malloc(max_msg_size); CUDA_CHECK(cudaMemset(data_d, 0, max_msg_size)); CUDA_CHECK(cudaMalloc((void **)&counter_d, sizeof(unsigned int) * 2)); CUDA_CHECK(cudaMemset(counter_d, 0, sizeof(unsigned int) * 2)); CUDA_CHECK(cudaDeviceSynchronize()); int size; i = 0; if (mype == 0) { for (size = 1024; size <= MAX_MSG_SIZE; size *= 2) { int blocks = max_blocks, threads = max_threads; h_size_arr[i] = size; CUDA_CHECK(cudaMemset(counter_d, 0, sizeof(unsigned int) * 2)); bw<<<blocks, threads>>>(data_d, counter_d, size / sizeof(double), mype, iter, skip, &h_bw[i]); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaDeviceSynchronize()); nvshmem_barrier_all(); i++; } } else { for (size = 1024; size <= MAX_MSG_SIZE; size *= 2) { nvshmem_barrier_all(); } } if (mype == 0) { print_table("shmem_g_bw", "None", "size (Bytes)", "BW", "GB/sec", '+', h_size_arr, h_bw, i); } finalize: if (data_d) nvshmem_free(data_d); free_tables(h_tables, 2); finalize_wrapper(); return 0; }
a863a47363f7b584994f24ca9eb70270b0705cfd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* test.cu */ # include <stdio.h> # include <string.h> # include "test_conf.h" # include "test_cpu.h" # include "test_kernel.cu" /* ================ Procedure_gpu =================== */ //* hote cpu void procedure_cpu_gpu(int size) { int *T, *T_device, *T_aux; int i, block_number; T = (int*) malloc(size*sizeof(int)); T_aux = (int*) malloc(size*sizeof(int)); for (i=0; i<size; i++) T[i] = i; hipMalloc( (void**) &T_device, size*sizeof(int) ); hipMemcpy( T_device, T, size*sizeof(int), hipMemcpyHostToDevice ); // appel du kernel ! hipMemcpy( T_aux, T_device, size*sizeof(int), hipMemcpyDeviceToHost ); printf("T_aux = [ "); display_tab(T_aux, size); printf("]\n"); /*kernel_1<<< 1, 1 >>> (T_device); hipMemcpy( T_aux, T_device, size*sizeof(int), hipMemcpyDeviceToHost ); printf("T_aux = [ "); display_tab(T_aux, size); printf("]\n"); */ block_number = size/BLOCK_SIZE; if ( (size % BLOCK_SIZE) != 0 ) block_number++; //10000 elements //256 threads per block //39+1 blocks //40*256 = 10240 threads hipLaunchKernelGGL(( inc_gpu), dim3(block_number), dim3(BLOCK_SIZE) , 0, 0, T_device, size); hipMemcpy( T_aux, T_device, size*sizeof(int), hipMemcpyDeviceToHost ); printf("T_aux = [ "); display_tab(T_aux, size); printf("]\n"); hipFree(T_device); free(T); free(T_aux); } /* ===================== Main ======================== */ int main (int argc, char* argv[]) { int size = atoi(argv[1]); // procedure_cpu(size); // procedure_cpu2(size); procedure_cpu_gpu(size); }
a863a47363f7b584994f24ca9eb70270b0705cfd.cu
/* test.cu */ # include <stdio.h> # include <string.h> # include "test_conf.h" # include "test_cpu.h" # include "test_kernel.cu" /* ================ Procedure_gpu =================== */ //* hote cpu void procedure_cpu_gpu(int size) { int *T, *T_device, *T_aux; int i, block_number; T = (int*) malloc(size*sizeof(int)); T_aux = (int*) malloc(size*sizeof(int)); for (i=0; i<size; i++) T[i] = i; cudaMalloc( (void**) &T_device, size*sizeof(int) ); cudaMemcpy( T_device, T, size*sizeof(int), cudaMemcpyHostToDevice ); // appel du kernel ! cudaMemcpy( T_aux, T_device, size*sizeof(int), cudaMemcpyDeviceToHost ); printf("T_aux = [ "); display_tab(T_aux, size); printf("]\n"); /*kernel_1<<< 1, 1 >>> (T_device); cudaMemcpy( T_aux, T_device, size*sizeof(int), cudaMemcpyDeviceToHost ); printf("T_aux = [ "); display_tab(T_aux, size); printf("]\n"); */ block_number = size/BLOCK_SIZE; if ( (size % BLOCK_SIZE) != 0 ) block_number++; //10000 elements //256 threads per block //39+1 blocks //40*256 = 10240 threads inc_gpu<<< block_number, BLOCK_SIZE >>> (T_device, size); cudaMemcpy( T_aux, T_device, size*sizeof(int), cudaMemcpyDeviceToHost ); printf("T_aux = [ "); display_tab(T_aux, size); printf("]\n"); cudaFree(T_device); free(T); free(T_aux); } /* ===================== Main ======================== */ int main (int argc, char* argv[]) { int size = atoi(argv[1]); // procedure_cpu(size); // procedure_cpu2(size); procedure_cpu_gpu(size); }
19e61d94890b45ed7932a22d81cdd1bde497814b.hip
// !!! This is a file automatically generated by hipify!!! /*https://cdac.in/index.aspx?id=ev_hpc_gpu-comp-nvidia-cuda-streams#hetr-cuda-prog-cuda-streams*/ #include <stdio.h> #include <time.h> #include <hip/hip_runtime.h> //#define sizeOfArray 1024*1024 // 1. Execute Everything synchronously // 2. Execute Everything asynchronously // 3. Execute Memcpy Synchronously and kernel launch Asynchronously // 4. Execute Memcpy asynchronously and kernel launch synchronously // ** Measure variability across the above by chaning data size ** // ** Utilize Multiple Streams like in the example: https://devblogs.nvidia.com/gpu-pro-tip-cuda-7-streams-simplify-concurrency/ ** // Put global variables here int REGS_PER_BLOCK, WARP_SIZE, MAX_THREADS_PER_BLOCK, *MAX_THREADS_DIM; size_t TOTAL_GLOBAL_MEM, TOTAL_CONST_MEM; __global__ void arrayAddition(int *device_a, int *device_b, int *device_result, int sizeOfArray) { int threadId = threadIdx.x + blockIdx.x * blockDim.x ; if (threadId < sizeOfArray) device_result[threadId]= device_a[threadId]+device_b[threadId]; } /* *checkCuda: will check to see if there is an error returned by CUDA runtime */ inline void checkCuda(hipError_t errMsg, const char* errContext) { if(errMsg != hipSuccess) { fprintf(stderr, "CUDA Runtime Error From %s: %s\n", errContext, hipGetErrorString(errMsg)); exit(EXIT_FAILURE); } } /* * getCUDAInfo() - originally named "getHardwareContraints in module 3 * - this function will get CUDA information pertaining to the hardware * on which we are operating... the code can then reason on these reports to determine * the best way to structure memory transfers between the host and device */ void getCUDAInfo() { //=============================Gets number of cuda devices=========================================== int deviceCount = 0; checkCuda(hipGetDeviceCount(&deviceCount), "Failed deviceCount load"); // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0) { printf("There are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); } //=============================Gets number of cuda devices=========================================== // for each device found, store this device in some type of object int device; for (device = 0; device < deviceCount; device++) { // Sets the context of the device so that we know which device we are working with if there // are multiple hipSetDevice(device); hipDeviceProp_t deviceProp; // gets the "properties" struct that stores the properties of a device // from this property struct, we can query the limitations of this device hipGetDeviceProperties(&deviceProp, device); printf("\nDevice: %d \"%s\"\n===========================================\n", device, deviceProp.name); TOTAL_GLOBAL_MEM = deviceProp.totalGlobalMem; REGS_PER_BLOCK = deviceProp.regsPerBlock; WARP_SIZE = deviceProp.warpSize; MAX_THREADS_PER_BLOCK = deviceProp.maxThreadsPerBlock; MAX_THREADS_DIM = deviceProp.maxThreadsDim; TOTAL_CONST_MEM = deviceProp.totalConstMem; printf("The %s has:\n\t-%zu total bytes of global memory\n\t-%zu bytes of constant memory\n\t-%d registers per block\n\t-%d threads per warp\n\t-A maximum of %d threads per block\n\t-A maximum thread dimension of %d x %d x %d\n", deviceProp.name, TOTAL_GLOBAL_MEM,TOTAL_CONST_MEM, REGS_PER_BLOCK, WARP_SIZE, MAX_THREADS_PER_BLOCK, MAX_THREADS_DIM[0], MAX_THREADS_DIM[1], MAX_THREADS_DIM[2]); // What I think we care about: // 1. totalGlobalMem // 2. regsPerBlock // 3. warpSize (i.e. numThreadsPerBlock (is this equal to regsPerBlock??) // 4. maxThreadsperBlock // 5. maxThreadsDim[3] } } bool verifyResult(int *host_a, int *host_b, int **host_result, int num_streams, int sizeOfArray) { bool testPassed = true; for(int i = 0; i < num_streams; i++) { for(int j = 0; j < sizeOfArray; j++) { if((host_a[j] + host_b[j]) != host_result[i][j]) { testPassed = false; printf("Case %d Failed on Stream %d, iteration %d: %d + %d = Serial Result: %d ParallelResult: %d\n", 0,i, j, host_a[j], host_b[j], host_a[j] + host_b[j], host_result[i][j]); } } } return testPassed; } float testStream(int **device_a, int **device_b, int **device_result, int *host_a, int *host_b, int **host_result, hipStream_t *streams, hipEvent_t start, hipEvent_t stop, int num_streams, int sizeOfArray,int sync_test, int gridSize, int blockSize) { float elapsedTime = 0; for(int i = 0; i < num_streams; i++) { // Create new stream on each iteration checkCuda(hipStreamCreate(&streams[i]), "hipStreamCreate"); // Allocate device memory for each iteration hipMalloc( ( void**)& device_a[i], sizeOfArray * sizeof ( **device_a ) ); hipMalloc( ( void**)& device_b[i],sizeOfArray * sizeof ( **device_b ) ); hipMalloc( ( void**)& device_result[i], sizeOfArray * sizeof ( **device_result ) ); hipEventRecord(start,0); switch(sync_test) { // Synchronous memcpy and kernel launch case 0: checkCuda(hipMemcpy(device_a[i], host_a,sizeOfArray * sizeof ( int ), hipMemcpyHostToDevice), "host to device async memcpy 1"); checkCuda(hipMemcpy(device_b[i], host_b, sizeOfArray * sizeof ( int ), hipMemcpyHostToDevice), "host to device async memcpy 2"); hipLaunchKernelGGL(( arrayAddition) , dim3(gridSize), dim3(blockSize), blockSize * sizeof(int), 0, device_a[i], device_b[i], device_result[i], sizeOfArray); checkCuda(hipMemcpy(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), hipMemcpyDeviceToHost), "device to host async memcpy"); break; // Asynchronous memcpy and synchronous kernel launch case 1: checkCuda(hipMemcpyAsync(device_a[i], host_a,sizeOfArray * sizeof ( int ), hipMemcpyHostToDevice, streams[i]), "host to device async memcpy 1"); checkCuda(hipMemcpyAsync(device_b[i], host_b, sizeOfArray * sizeof ( int ), hipMemcpyHostToDevice, streams[i]), "host to device async memcpy 2"); hipLaunchKernelGGL(( arrayAddition) , dim3(gridSize), dim3(blockSize), blockSize * sizeof(int), 0, device_a[i], device_b[i], device_result[i], sizeOfArray); checkCuda(hipMemcpyAsync(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), hipMemcpyDeviceToHost, streams[i]), "device to host async memcpy"); break; // Synchronous memcpy and asynchronous kernel launch case 2: checkCuda(hipMemcpy(device_a[i], host_a,sizeOfArray * sizeof ( int ), hipMemcpyHostToDevice), "host to device async memcpy 1"); checkCuda(hipMemcpy(device_b[i], host_b, sizeOfArray * sizeof ( int ), hipMemcpyHostToDevice), "host to device async memcpy 2"); hipLaunchKernelGGL(( arrayAddition) , dim3(gridSize), dim3(blockSize), blockSize * sizeof(int), streams[i], device_a[i], device_b[i], device_result[i], sizeOfArray); checkCuda(hipMemcpy(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), hipMemcpyDeviceToHost), "device to host async memcpy"); break; case 3: checkCuda(hipMemcpyAsync(device_a[i], host_a,sizeOfArray * sizeof ( int ), hipMemcpyHostToDevice, streams[i]), "host to device async memcpy 1"); checkCuda(hipMemcpyAsync(device_b[i], host_b, sizeOfArray * sizeof ( int ), hipMemcpyHostToDevice, streams[i]), "host to device async memcpy 2"); hipLaunchKernelGGL(( arrayAddition) , dim3(gridSize), dim3(blockSize), blockSize * sizeof(int), streams[i], device_a[i], device_b[i], device_result[i], sizeOfArray); checkCuda(hipMemcpyAsync(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), hipMemcpyDeviceToHost, streams[i]), "device to host async memcpy"); break; default: break; } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); return elapsedTime; } /* Check for safe return of all calls to the device */ int main ( int argc, char **argv ) { // Take block size and data size as input // Check block size to make sure it does not exceed the maximum number of threads per block int blockSize = 512; int sizeOfArray = 1024 * 1024; if(argc == 3) { //numRuns = atoi(argv[1]); blockSize = atoi(argv[1]); sizeOfArray = atoi(argv[2]); } const int num_streams = 10; int *host_a, *host_b, *host_result[num_streams]; int *device_a[num_streams], *device_b[num_streams], *device_result[num_streams]; getCUDAInfo(); int gridSize = ((sizeOfArray % blockSize) == 0) ? (sizeOfArray / blockSize) : ((sizeOfArray / blockSize) + 1); // Create array to store each stream: hipStream_t streams[num_streams]; hipEvent_t start, stop; float elapsedTime0, elapsedTime1 = 0, elapsedTime2 = 0, elapsedTime3 = 0 ; hipEventCreate( &start ); hipEventCreate( &stop ); // Allocate host memory hipHostMalloc((void **)&host_a, sizeOfArray*sizeof(int), hipHostMallocDefault); hipHostMalloc((void **)&host_b, sizeOfArray*sizeof(int), hipHostMallocDefault); for(int i = 0; i < num_streams; i++) { hipHostMalloc((void **)&host_result[i], num_streams*sizeOfArray*sizeof(int), hipHostMallocDefault); } // Initiailize host memory for(int index = 0; index < sizeOfArray; index++) { host_a[index] = rand()%10; host_b[index] = rand()%10; } //hipEventRecord(start,0); elapsedTime0 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 0, gridSize, blockSize); //hipEventRecord(stop, 0); //hipEventSynchronize(stop); //hipEventElapsedTime(&elapsedTime0, start, stop); verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray); //hipEventRecord(start, 0); elapsedTime1 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 1, gridSize, blockSize); //hipEventRecord(stop, 0); //hipEventSynchronize(stop); //hipEventElapsedTime(&elapsedTime1, start, stop); verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray); //hipEventRecord(start, 0); elapsedTime2 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 2, gridSize, blockSize); //hipEventRecord(stop, 0); //hipEventSynchronize(stop); //hipEventElapsedTime(&elapsedTime2, start, stop); verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray); //hipEventRecord(start, 0); elapsedTime3 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 3, gridSize, blockSize); //hipEventRecord(stop, 0); //hipEventSynchronize(stop); //hipEventElapsedTime(&elapsedTime3, start, stop); verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray); /*hipStream_t stream; hipStreamCreate(&stream); */ //printf("*********** CDAC - Tech Workshop : hyPACK-2013 \n"); printf("\n Size of array : %d \n", sizeOfArray); printf("\n Time taken for synchronous memcpy and synchronous kernel launch: %f ms \n", elapsedTime0); printf("\n Time taken for asynchronous memcpy and synchronous kernel launch: %f ms \n", elapsedTime1); printf("\n Time taken for synchronous memcpy and asynchronous kernel launch: %f ms \n", elapsedTime2); printf("\n Time taken for asynchronous memcpy and asynchronous kernel launch: %f ms \n", elapsedTime3); for(int i = 0; i < num_streams; i++) { hipStreamDestroy(streams[i]); } hipEventDestroy(stop); hipEventDestroy(start); hipHostFree(host_a); hipHostFree(host_b); hipHostFree(host_result); hipFree(device_a); hipFree(device_b); hipFree(device_result); return 0; }
19e61d94890b45ed7932a22d81cdd1bde497814b.cu
/*https://cdac.in/index.aspx?id=ev_hpc_gpu-comp-nvidia-cuda-streams#hetr-cuda-prog-cuda-streams*/ #include <stdio.h> #include <time.h> #include <cuda.h> //#define sizeOfArray 1024*1024 // 1. Execute Everything synchronously // 2. Execute Everything asynchronously // 3. Execute Memcpy Synchronously and kernel launch Asynchronously // 4. Execute Memcpy asynchronously and kernel launch synchronously // ** Measure variability across the above by chaning data size ** // ** Utilize Multiple Streams like in the example: https://devblogs.nvidia.com/gpu-pro-tip-cuda-7-streams-simplify-concurrency/ ** // Put global variables here int REGS_PER_BLOCK, WARP_SIZE, MAX_THREADS_PER_BLOCK, *MAX_THREADS_DIM; size_t TOTAL_GLOBAL_MEM, TOTAL_CONST_MEM; __global__ void arrayAddition(int *device_a, int *device_b, int *device_result, int sizeOfArray) { int threadId = threadIdx.x + blockIdx.x * blockDim.x ; if (threadId < sizeOfArray) device_result[threadId]= device_a[threadId]+device_b[threadId]; } /* *checkCuda: will check to see if there is an error returned by CUDA runtime */ inline void checkCuda(cudaError_t errMsg, const char* errContext) { if(errMsg != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error From %s: %s\n", errContext, cudaGetErrorString(errMsg)); exit(EXIT_FAILURE); } } /* * getCUDAInfo() - originally named "getHardwareContraints in module 3 * - this function will get CUDA information pertaining to the hardware * on which we are operating... the code can then reason on these reports to determine * the best way to structure memory transfers between the host and device */ void getCUDAInfo() { //=============================Gets number of cuda devices=========================================== int deviceCount = 0; checkCuda(cudaGetDeviceCount(&deviceCount), "Failed deviceCount load"); // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0) { printf("There are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); } //=============================Gets number of cuda devices=========================================== // for each device found, store this device in some type of object int device; for (device = 0; device < deviceCount; device++) { // Sets the context of the device so that we know which device we are working with if there // are multiple cudaSetDevice(device); cudaDeviceProp deviceProp; // gets the "properties" struct that stores the properties of a device // from this property struct, we can query the limitations of this device cudaGetDeviceProperties(&deviceProp, device); printf("\nDevice: %d \"%s\"\n===========================================\n", device, deviceProp.name); TOTAL_GLOBAL_MEM = deviceProp.totalGlobalMem; REGS_PER_BLOCK = deviceProp.regsPerBlock; WARP_SIZE = deviceProp.warpSize; MAX_THREADS_PER_BLOCK = deviceProp.maxThreadsPerBlock; MAX_THREADS_DIM = deviceProp.maxThreadsDim; TOTAL_CONST_MEM = deviceProp.totalConstMem; printf("The %s has:\n\t-%zu total bytes of global memory\n\t-%zu bytes of constant memory\n\t-%d registers per block\n\t-%d threads per warp\n\t-A maximum of %d threads per block\n\t-A maximum thread dimension of %d x %d x %d\n", deviceProp.name, TOTAL_GLOBAL_MEM,TOTAL_CONST_MEM, REGS_PER_BLOCK, WARP_SIZE, MAX_THREADS_PER_BLOCK, MAX_THREADS_DIM[0], MAX_THREADS_DIM[1], MAX_THREADS_DIM[2]); // What I think we care about: // 1. totalGlobalMem // 2. regsPerBlock // 3. warpSize (i.e. numThreadsPerBlock (is this equal to regsPerBlock??) // 4. maxThreadsperBlock // 5. maxThreadsDim[3] } } bool verifyResult(int *host_a, int *host_b, int **host_result, int num_streams, int sizeOfArray) { bool testPassed = true; for(int i = 0; i < num_streams; i++) { for(int j = 0; j < sizeOfArray; j++) { if((host_a[j] + host_b[j]) != host_result[i][j]) { testPassed = false; printf("Case %d Failed on Stream %d, iteration %d: %d + %d = Serial Result: %d ParallelResult: %d\n", 0,i, j, host_a[j], host_b[j], host_a[j] + host_b[j], host_result[i][j]); } } } return testPassed; } float testStream(int **device_a, int **device_b, int **device_result, int *host_a, int *host_b, int **host_result, cudaStream_t *streams, cudaEvent_t start, cudaEvent_t stop, int num_streams, int sizeOfArray,int sync_test, int gridSize, int blockSize) { float elapsedTime = 0; for(int i = 0; i < num_streams; i++) { // Create new stream on each iteration checkCuda(cudaStreamCreate(&streams[i]), "cudaStreamCreate"); // Allocate device memory for each iteration cudaMalloc( ( void**)& device_a[i], sizeOfArray * sizeof ( **device_a ) ); cudaMalloc( ( void**)& device_b[i],sizeOfArray * sizeof ( **device_b ) ); cudaMalloc( ( void**)& device_result[i], sizeOfArray * sizeof ( **device_result ) ); cudaEventRecord(start,0); switch(sync_test) { // Synchronous memcpy and kernel launch case 0: checkCuda(cudaMemcpy(device_a[i], host_a,sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice), "host to device async memcpy 1"); checkCuda(cudaMemcpy(device_b[i], host_b, sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice), "host to device async memcpy 2"); arrayAddition <<<gridSize, blockSize, blockSize * sizeof(int)>>>(device_a[i], device_b[i], device_result[i], sizeOfArray); checkCuda(cudaMemcpy(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), cudaMemcpyDeviceToHost), "device to host async memcpy"); break; // Asynchronous memcpy and synchronous kernel launch case 1: checkCuda(cudaMemcpyAsync(device_a[i], host_a,sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice, streams[i]), "host to device async memcpy 1"); checkCuda(cudaMemcpyAsync(device_b[i], host_b, sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice, streams[i]), "host to device async memcpy 2"); arrayAddition <<<gridSize, blockSize, blockSize * sizeof(int)>>>(device_a[i], device_b[i], device_result[i], sizeOfArray); checkCuda(cudaMemcpyAsync(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), cudaMemcpyDeviceToHost, streams[i]), "device to host async memcpy"); break; // Synchronous memcpy and asynchronous kernel launch case 2: checkCuda(cudaMemcpy(device_a[i], host_a,sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice), "host to device async memcpy 1"); checkCuda(cudaMemcpy(device_b[i], host_b, sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice), "host to device async memcpy 2"); arrayAddition <<<gridSize, blockSize, blockSize * sizeof(int), streams[i]>>>(device_a[i], device_b[i], device_result[i], sizeOfArray); checkCuda(cudaMemcpy(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), cudaMemcpyDeviceToHost), "device to host async memcpy"); break; case 3: checkCuda(cudaMemcpyAsync(device_a[i], host_a,sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice, streams[i]), "host to device async memcpy 1"); checkCuda(cudaMemcpyAsync(device_b[i], host_b, sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice, streams[i]), "host to device async memcpy 2"); arrayAddition <<<gridSize, blockSize, blockSize * sizeof(int), streams[i]>>>(device_a[i], device_b[i], device_result[i], sizeOfArray); checkCuda(cudaMemcpyAsync(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), cudaMemcpyDeviceToHost, streams[i]), "device to host async memcpy"); break; default: break; } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); return elapsedTime; } /* Check for safe return of all calls to the device */ int main ( int argc, char **argv ) { // Take block size and data size as input // Check block size to make sure it does not exceed the maximum number of threads per block int blockSize = 512; int sizeOfArray = 1024 * 1024; if(argc == 3) { //numRuns = atoi(argv[1]); blockSize = atoi(argv[1]); sizeOfArray = atoi(argv[2]); } const int num_streams = 10; int *host_a, *host_b, *host_result[num_streams]; int *device_a[num_streams], *device_b[num_streams], *device_result[num_streams]; getCUDAInfo(); int gridSize = ((sizeOfArray % blockSize) == 0) ? (sizeOfArray / blockSize) : ((sizeOfArray / blockSize) + 1); // Create array to store each stream: cudaStream_t streams[num_streams]; cudaEvent_t start, stop; float elapsedTime0, elapsedTime1 = 0, elapsedTime2 = 0, elapsedTime3 = 0 ; cudaEventCreate( &start ); cudaEventCreate( &stop ); // Allocate host memory cudaHostAlloc((void **)&host_a, sizeOfArray*sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void **)&host_b, sizeOfArray*sizeof(int), cudaHostAllocDefault); for(int i = 0; i < num_streams; i++) { cudaHostAlloc((void **)&host_result[i], num_streams*sizeOfArray*sizeof(int), cudaHostAllocDefault); } // Initiailize host memory for(int index = 0; index < sizeOfArray; index++) { host_a[index] = rand()%10; host_b[index] = rand()%10; } //cudaEventRecord(start,0); elapsedTime0 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 0, gridSize, blockSize); //cudaEventRecord(stop, 0); //cudaEventSynchronize(stop); //cudaEventElapsedTime(&elapsedTime0, start, stop); verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray); //cudaEventRecord(start, 0); elapsedTime1 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 1, gridSize, blockSize); //cudaEventRecord(stop, 0); //cudaEventSynchronize(stop); //cudaEventElapsedTime(&elapsedTime1, start, stop); verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray); //cudaEventRecord(start, 0); elapsedTime2 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 2, gridSize, blockSize); //cudaEventRecord(stop, 0); //cudaEventSynchronize(stop); //cudaEventElapsedTime(&elapsedTime2, start, stop); verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray); //cudaEventRecord(start, 0); elapsedTime3 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 3, gridSize, blockSize); //cudaEventRecord(stop, 0); //cudaEventSynchronize(stop); //cudaEventElapsedTime(&elapsedTime3, start, stop); verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray); /*cudaStream_t stream; cudaStreamCreate(&stream); */ //printf("*********** CDAC - Tech Workshop : hyPACK-2013 \n"); printf("\n Size of array : %d \n", sizeOfArray); printf("\n Time taken for synchronous memcpy and synchronous kernel launch: %f ms \n", elapsedTime0); printf("\n Time taken for asynchronous memcpy and synchronous kernel launch: %f ms \n", elapsedTime1); printf("\n Time taken for synchronous memcpy and asynchronous kernel launch: %f ms \n", elapsedTime2); printf("\n Time taken for asynchronous memcpy and asynchronous kernel launch: %f ms \n", elapsedTime3); for(int i = 0; i < num_streams; i++) { cudaStreamDestroy(streams[i]); } cudaEventDestroy(stop); cudaEventDestroy(start); cudaFreeHost(host_a); cudaFreeHost(host_b); cudaFreeHost(host_result); cudaFree(device_a); cudaFree(device_b); cudaFree(device_result); return 0; }
1d84201bc4c3acb643fd65b827c7b8b351c936fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches zlacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread copies one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zlacpy_batched_kernel( int m, int n, const magmaDoubleComplex * const *dAarray, int ldda, magmaDoubleComplex **dBarray, int lddb ) { // dA and dB iterate across row i const magmaDoubleComplex *dA = dAarray[ blockIdx.y ]; magmaDoubleComplex *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const magmaDoubleComplex *dAend = dA + n*ldda; while( dA < dAend ) { *dB = *dA; dA += ldda; dB += lddb; } } } /* ===================================================================== */ extern "C" void magmablas_zlacpy_batched( char uplo, magma_int_t m, magma_int_t n, const magmaDoubleComplex * const *dAarray, magma_int_t ldda, magmaDoubleComplex **dBarray, magma_int_t lddb, magma_int_t batchCount ) { /* Note ======== - UPLO Parameter is disabled - Do we want to provide a generic function to the user with all the options? Purpose ======= ZLACPY copies all or part of a set of two-dimensional matrices dAarray[i] to another set of matrices dBarray[i], for i = 0, ..., batchCount-1. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of each matrix dAarray[i] to be copied to dBarray[i]. = 'U': Upper triangular part = 'L': Lower triangular part Otherwise: All of each matrix dAarray[i] M (input) INTEGER The number of rows of each matrix dAarray[i]. M >= 0. N (input) INTEGER The number of columns of each matrix dAarray[i]. N >= 0. dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrices dAarray[i]. If UPLO = 'U', only the upper triangle or trapezoid is accessed; if UPLO = 'L', only the lower triangle or trapezoid is accessed. LDDA (input) INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). dBarray (output) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDB,N) The m by n matrices dBarray[i]. On exit, matrix dBarray[i] = matrix dAarray[i] in the locations specified by UPLO. LDDB (input) INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). batchCount (input) INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. ===================================================================== */ magma_int_t info = 0; if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); if ( (uplo == 'U') || (uplo == 'u') ) { fprintf(stderr, "lacpy upper is not implemented\n"); } else if ( (uplo == 'L') || (uplo == 'l') ) { fprintf(stderr, "lacpy lower is not implemented\n"); } else { hipLaunchKernelGGL(( zlacpy_batched_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, dAarray, ldda, dBarray, lddb ); } }
1d84201bc4c3acb643fd65b827c7b8b351c936fd.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches zlacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread copies one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zlacpy_batched_kernel( int m, int n, const magmaDoubleComplex * const *dAarray, int ldda, magmaDoubleComplex **dBarray, int lddb ) { // dA and dB iterate across row i const magmaDoubleComplex *dA = dAarray[ blockIdx.y ]; magmaDoubleComplex *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const magmaDoubleComplex *dAend = dA + n*ldda; while( dA < dAend ) { *dB = *dA; dA += ldda; dB += lddb; } } } /* ===================================================================== */ extern "C" void magmablas_zlacpy_batched( char uplo, magma_int_t m, magma_int_t n, const magmaDoubleComplex * const *dAarray, magma_int_t ldda, magmaDoubleComplex **dBarray, magma_int_t lddb, magma_int_t batchCount ) { /* Note ======== - UPLO Parameter is disabled - Do we want to provide a generic function to the user with all the options? Purpose ======= ZLACPY copies all or part of a set of two-dimensional matrices dAarray[i] to another set of matrices dBarray[i], for i = 0, ..., batchCount-1. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of each matrix dAarray[i] to be copied to dBarray[i]. = 'U': Upper triangular part = 'L': Lower triangular part Otherwise: All of each matrix dAarray[i] M (input) INTEGER The number of rows of each matrix dAarray[i]. M >= 0. N (input) INTEGER The number of columns of each matrix dAarray[i]. N >= 0. dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by n matrices dAarray[i]. If UPLO = 'U', only the upper triangle or trapezoid is accessed; if UPLO = 'L', only the lower triangle or trapezoid is accessed. LDDA (input) INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). dBarray (output) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX DOUBLE PRECISION array, dimension (LDDB,N) The m by n matrices dBarray[i]. On exit, matrix dBarray[i] = matrix dAarray[i] in the locations specified by UPLO. LDDB (input) INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). batchCount (input) INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. ===================================================================== */ magma_int_t info = 0; if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); if ( (uplo == 'U') || (uplo == 'u') ) { fprintf(stderr, "lacpy upper is not implemented\n"); } else if ( (uplo == 'L') || (uplo == 'l') ) { fprintf(stderr, "lacpy lower is not implemented\n"); } else { zlacpy_batched_kernel<<< grid, threads, 0, magma_stream >>>( m, n, dAarray, ldda, dBarray, lddb ); } }
1a893955471486a44ed8c9fe8a6182cb35f44da9.hip
// !!! This is a file automatically generated by hipify!!! // Esta aplicao somente ser executada em mquina Windows #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define _USE_MATH_DEFINES #include <math.h> #include <Windows.h> // Kernel __global__ void kernel(unsigned char* src) { __shared__ float temp[16][16]; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; const float period = 128.0f; temp[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*M_PI/period) + 1.0f) * (sinf(y*2.0f*M_PI/period) + 1.0f) / 4.0f; // Comente esta linha para ver a imagem inicial __syncthreads(); src[offset*4] = 0; src[offset*4+1] = temp[15-threadIdx.x][15-threadIdx.y]; src[offset*4+2] = 0; src[offset*4+3] = 255; } extern "C" __declspec(dllexport) void GenerateBitmap(unsigned char* dst, int dimension) { int size = dimension * dimension * 4; // assume RGBA hipError_t status; // Aloca memria unsigned char* src; status = hipMalloc(&src, size); dim3 blocks(dimension/16, dimension/16); dim3 threads(16,16); hipLaunchKernelGGL(( kernel), dim3(blocks),dim3(threads), 0, 0, src); hipMemcpy(dst, src, size, hipMemcpyDeviceToHost); hipFree(src); } BOOL APIENTRY DllMain( HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved ) { switch (ul_reason_for_call) { case DLL_PROCESS_ATTACH: case DLL_THREAD_ATTACH: case DLL_THREAD_DETACH: case DLL_PROCESS_DETACH: break; } return TRUE; }
1a893955471486a44ed8c9fe8a6182cb35f44da9.cu
// Esta aplicação somente será executada em máquina Windows #include "cuda_runtime.h" #include "device_launch_parameters.h" #define _USE_MATH_DEFINES #include <math.h> #include <Windows.h> // Kernel __global__ void kernel(unsigned char* src) { __shared__ float temp[16][16]; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; const float period = 128.0f; temp[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*M_PI/period) + 1.0f) * (sinf(y*2.0f*M_PI/period) + 1.0f) / 4.0f; // Comente esta linha para ver a imagem inicial __syncthreads(); src[offset*4] = 0; src[offset*4+1] = temp[15-threadIdx.x][15-threadIdx.y]; src[offset*4+2] = 0; src[offset*4+3] = 255; } extern "C" __declspec(dllexport) void GenerateBitmap(unsigned char* dst, int dimension) { int size = dimension * dimension * 4; // assume RGBA cudaError_t status; // Aloca memória unsigned char* src; status = cudaMalloc(&src, size); dim3 blocks(dimension/16, dimension/16); dim3 threads(16,16); kernel<<<blocks,threads>>>(src); cudaMemcpy(dst, src, size, cudaMemcpyDeviceToHost); cudaFree(src); } BOOL APIENTRY DllMain( HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved ) { switch (ul_reason_for_call) { case DLL_PROCESS_ATTACH: case DLL_THREAD_ATTACH: case DLL_THREAD_DETACH: case DLL_PROCESS_DETACH: break; } return TRUE; }
ex6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <sys/time.h> #include <hip/hip_runtime.h> // (*) include hiprand device library using namespace std; __global__ void setup_kernel(hiprandState_t *state) { int id = threadIdx.x + blockIdx.x * blockDim.x; // (*) initialize hiprand generator } __global__ void walk(hiprandState_t *state, double *result) { extern __shared__ double smem[]; int gid = threadIdx.x + blockIdx.x * blockDim.x; int id = threadIdx.x; // (*) generate double precision uniform random numbers in [0,1] __syncthreads(); // reduction in shared memory for(int s=blockDim.x/2; s>0; s>>=1) { if (id < s) { smem[id] += smem[id + s]; } __syncthreads(); } // copy block result to result array if (id == 0) result[blockIdx.x] = smem[id]; } int main(int argc, char *argv[]) { // time variables time_t sTime = time(NULL); struct timeval tt1, tt2, tt3; int ms; double fms; // number of steps int n = 1048576; // runtime configuration parameters int nThreads = 1024; int nBlocks = n/nThreads; // data and result arrays double *devResult, *hostResult; // host memory allocation hostResult = (double*)calloc(nBlocks,sizeof(double)); // device memory allocation hipMalloc((void**)&devResult, nBlocks*sizeof(double)); hiprandState_t *devStates; // (*) allocate space for hiprand states on device hipDeviceSynchronize(); gettimeofday( &tt1, NULL ); // (*) run setup kernel hipDeviceSynchronize(); gettimeofday( &tt2, NULL ); // random walk kernel run hipLaunchKernelGGL(( walk) , dim3(nBlocks), dim3(nThreads), nThreads*sizeof(double) , 0, devStates, devResult); hipDeviceSynchronize(); gettimeofday( &tt3, NULL ); // time calculation ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.0; cout << "Initialization time = " << fms << endl; ms = (tt3.tv_sec - tt2.tv_sec); ms = ms * 1000000 + (tt3.tv_usec - tt2.tv_usec); fms = ((double)ms)/1000000.0; cout << "Random walk time = " << fms << endl; // transfer results from device hipMemcpy(hostResult, devResult, nBlocks*sizeof(double), hipMemcpyDeviceToHost); // summation of block results double total = 0.0; for (int i=0; i<nBlocks; i++) { total += hostResult[i]; } // screen output of result cout << "Total distance = " << setprecision(9) << total << " in " << n << " steps." << endl; cout << "Expected distance = " << n/2 << endl; // cleanup hipFree(devResult); hipFree(devStates); free(hostResult); }
ex6.cu
#include <iostream> #include <iomanip> #include <sys/time.h> #include <cuda.h> // (*) include curand device library using namespace std; __global__ void setup_kernel(curandState *state) { int id = threadIdx.x + blockIdx.x * blockDim.x; // (*) initialize curand generator } __global__ void walk(curandState *state, double *result) { extern __shared__ double smem[]; int gid = threadIdx.x + blockIdx.x * blockDim.x; int id = threadIdx.x; // (*) generate double precision uniform random numbers in [0,1] __syncthreads(); // reduction in shared memory for(int s=blockDim.x/2; s>0; s>>=1) { if (id < s) { smem[id] += smem[id + s]; } __syncthreads(); } // copy block result to result array if (id == 0) result[blockIdx.x] = smem[id]; } int main(int argc, char *argv[]) { // time variables time_t sTime = time(NULL); struct timeval tt1, tt2, tt3; int ms; double fms; // number of steps int n = 1048576; // runtime configuration parameters int nThreads = 1024; int nBlocks = n/nThreads; // data and result arrays double *devResult, *hostResult; // host memory allocation hostResult = (double*)calloc(nBlocks,sizeof(double)); // device memory allocation cudaMalloc((void**)&devResult, nBlocks*sizeof(double)); curandState *devStates; // (*) allocate space for curand states on device cudaThreadSynchronize(); gettimeofday( &tt1, NULL ); // (*) run setup kernel cudaThreadSynchronize(); gettimeofday( &tt2, NULL ); // random walk kernel run walk <<< nBlocks, nThreads, nThreads*sizeof(double) >>> (devStates, devResult); cudaThreadSynchronize(); gettimeofday( &tt3, NULL ); // time calculation ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.0; cout << "Initialization time = " << fms << endl; ms = (tt3.tv_sec - tt2.tv_sec); ms = ms * 1000000 + (tt3.tv_usec - tt2.tv_usec); fms = ((double)ms)/1000000.0; cout << "Random walk time = " << fms << endl; // transfer results from device cudaMemcpy(hostResult, devResult, nBlocks*sizeof(double), cudaMemcpyDeviceToHost); // summation of block results double total = 0.0; for (int i=0; i<nBlocks; i++) { total += hostResult[i]; } // screen output of result cout << "Total distance = " << setprecision(9) << total << " in " << n << " steps." << endl; cout << "Expected distance = " << n/2 << endl; // cleanup cudaFree(devResult); cudaFree(devStates); free(hostResult); }
08ca86ff1bbe9b3361751fc6378deac08ca27d93.hip
// !!! This is a file automatically generated by hipify!!! #include "cavs/backend/op_impl.h" #include "cavs/backend/cuda_common.h" #include "cavs/backend/cudaRTC_wrapper.h" #include <string> #include <set> #include <vector> #include <algorithm> namespace backend { using ::midend::Tensor; using std::string; using std::vector; using std::set; template <typename T> class FusedKernelOpImpl : public OpImpl { public: explicit FusedKernelOpImpl(const OpDef& def) : OpImpl(def), stream_(hipStreamDefault) { const string& kernel_name = GetSingleArg<string>(def, "KernelName"); const string& kernel_src = GetSingleArg<string>(def, "KernelSource"); wrapper_.Compile(kernel_name, kernel_src); } void Compute(OpContext* context) override; private: RTC::CudaRTCWrapper wrapper_; hipStream_t stream_; }; template <typename T> void FusedKernelOpImpl<T>::Compute(OpContext* context) { vector<void*> outputs; vector<void*> inputs; vector<int> outputs_size; vector<int> inputs_size; set<int> size_conf; /*const int num_elements = context->Input(0).count();*/ for (int i = 0; i < context->OutputSize(); i++) { outputs.push_back((void*)(context->Output(i)->mutable_data<T>())); int count = context->Output(i)->count(); outputs_size.push_back(count); size_conf.insert(count); /*CHECK(context->Output(i)->count() == num_elements);*/ } for (int i = 0; i < context->InputSize(); i++) { inputs.push_back((void*)context->Input(i).data<T>()); int count = context->Input(i).count(); inputs_size.push_back(count); size_conf.insert(count); /*CHECK(context->Input(i).count() == num_elements);*/ } CHECK(size_conf.size() <= 2); const int num_elements = *(size_conf.rbegin()); if (!stream_ && context->GetStreamID() != -1) { stream_ = StreamEventHandlePool::GetCudaStream(context->GetStreamID()); VLOG(V_DEBUG) << "[Unary] Assign new stream with ID " << context->GetStreamID(); } wrapper_.Launch(outputs, inputs, outputs_size, inputs_size, num_elements, BLOCKS_PER_GRID(num_elements), 1, 1, THREADS_PER_BLOCK, 1, 1, stream_); for (int i = 0; i < context->InputSize(); i++) { context->Input(i).DebugNumerical<T>(); } for (int i = 0; i < context->OutputSize(); i++) { context->Output(i)->DebugNumerical<T>(); } } REGISTER_OP_IMPL_BUILDER(Key("FusedKernel").Device("GPU"), FusedKernelOpImpl<float>); } //namespace backend
08ca86ff1bbe9b3361751fc6378deac08ca27d93.cu
#include "cavs/backend/op_impl.h" #include "cavs/backend/cuda_common.h" #include "cavs/backend/cudaRTC_wrapper.h" #include <string> #include <set> #include <vector> #include <algorithm> namespace backend { using ::midend::Tensor; using std::string; using std::vector; using std::set; template <typename T> class FusedKernelOpImpl : public OpImpl { public: explicit FusedKernelOpImpl(const OpDef& def) : OpImpl(def), stream_(cudaStreamDefault) { const string& kernel_name = GetSingleArg<string>(def, "KernelName"); const string& kernel_src = GetSingleArg<string>(def, "KernelSource"); wrapper_.Compile(kernel_name, kernel_src); } void Compute(OpContext* context) override; private: RTC::CudaRTCWrapper wrapper_; cudaStream_t stream_; }; template <typename T> void FusedKernelOpImpl<T>::Compute(OpContext* context) { vector<void*> outputs; vector<void*> inputs; vector<int> outputs_size; vector<int> inputs_size; set<int> size_conf; /*const int num_elements = context->Input(0).count();*/ for (int i = 0; i < context->OutputSize(); i++) { outputs.push_back((void*)(context->Output(i)->mutable_data<T>())); int count = context->Output(i)->count(); outputs_size.push_back(count); size_conf.insert(count); /*CHECK(context->Output(i)->count() == num_elements);*/ } for (int i = 0; i < context->InputSize(); i++) { inputs.push_back((void*)context->Input(i).data<T>()); int count = context->Input(i).count(); inputs_size.push_back(count); size_conf.insert(count); /*CHECK(context->Input(i).count() == num_elements);*/ } CHECK(size_conf.size() <= 2); const int num_elements = *(size_conf.rbegin()); if (!stream_ && context->GetStreamID() != -1) { stream_ = StreamEventHandlePool::GetCudaStream(context->GetStreamID()); VLOG(V_DEBUG) << "[Unary] Assign new stream with ID " << context->GetStreamID(); } wrapper_.Launch(outputs, inputs, outputs_size, inputs_size, num_elements, BLOCKS_PER_GRID(num_elements), 1, 1, THREADS_PER_BLOCK, 1, 1, stream_); for (int i = 0; i < context->InputSize(); i++) { context->Input(i).DebugNumerical<T>(); } for (int i = 0; i < context->OutputSize(); i++) { context->Output(i)->DebugNumerical<T>(); } } REGISTER_OP_IMPL_BUILDER(Key("FusedKernel").Device("GPU"), FusedKernelOpImpl<float>); } //namespace backend
da1a3f745d822e7cd6100f5346331d9e1808c3c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdint> #include <iostream> #include "helpers.cuh" #include "encryption.cuh" // Host function. void encrypt_cpu(uint64_t * data, uint64_t num_entries, uint64_t num_iters, bool parallel=true) { // Use OpenMP to use all available CPU cores. #pragma omp parallel for if (parallel) for (uint64_t entry = 0; entry < num_entries; entry++) // Permute each data entry the number of iterations and then write result to data. data[entry] = permute64(entry, num_iters); } // Device function. __global__ void decrypt_gpu(uint64_t * data, uint64_t num_entries, uint64_t num_iters) { const uint64_t thrdID = blockIdx.x*blockDim.x+threadIdx.x; const uint64_t stride = blockDim.x*gridDim.x; //printf(" checkpoint i0\n"); // Utilize grid-stride loop for arbitrary data sizes. for (uint64_t entry = thrdID; entry < num_entries; entry += stride) // Unpermute each data entry the number of iterations then write result to data. data[entry] = unpermute64(data[entry], num_iters); //printf(" checkpoint i1\n"); } // Host function. bool check_result_cpu(uint64_t * data, uint64_t num_entries, bool parallel=true) { uint64_t counter = 0; #pragma omp parallel for reduction(+: counter) if (parallel) for (uint64_t entry = 0; entry < num_entries; entry++) // Because we created initial data values by ranging from 0 to N-1, // and because encrypting and decrypting is symmetrical, // then each data entry should be equal to `entry`. counter += data[entry] == entry; // True if all values have been correctly decrypted. return counter == num_entries; } int main (int argc, char * argv[]) { // This file will be used to cache encryption results // so we don't have to wait on the CPU every time. //const char * encrypted_file = "/dli/task/encrypted"; const char * encrypted_file = "hello"; //"/home/babak/Codes/Learning/HPC/2_Cuda/6_Accelerating_CUDA_C++_Applications_with_Concurrent_Streams/3_Application/hello"; // Timer instance to be used for sections of the application. Timer timer; // Timer instance to be used for total time on the GPU(s). Timer overall; const uint64_t num_entries = 1UL << 26; const uint64_t num_iters = 1UL << 10; // Use all available CPUs in parallel for host calculations. //const bool openmp = true; const bool openmp = false; // This timer start and then stop pattern will be used throughout the application. timer.start(); uint64_t * data_cpu, * data_gpu; // hipHostMalloc will be discussed at length later in the course. hipHostMalloc(&data_cpu, sizeof(uint64_t)*num_entries); hipMalloc (&data_gpu, sizeof(uint64_t)*num_entries); timer.stop("allocate memory"); check_last_error(); timer.start(); // If encryption cache file does not exist... if (!encrypted_file_exists(encrypted_file)) { // ...encrypt data in parallel on CPU... std::cout << " encrypting... \n "; encrypt_cpu(data_cpu, num_entries, num_iters, openmp); // ...and make encryption cache file for later. write_encrypted_to_file(encrypted_file, data_cpu, sizeof(uint64_t)*num_entries); } else { std::cout << " reading... \n "; // Use encryption cache file if it exists. read_encrypted_from_file(encrypted_file, data_cpu, sizeof(uint64_t)*num_entries); } timer.stop("encrypt data on CPU"); // Begin timing for total time on GPU(s). overall.start(); timer.start(); // Data copy from CPU to GPU. hipMemcpy(data_gpu, data_cpu, sizeof(uint64_t)*num_entries, hipMemcpyHostToDevice); timer.stop("copy data from CPU to GPU"); check_last_error(); timer.start(); // Decrypt data on GPU(s). hipLaunchKernelGGL(( decrypt_gpu), dim3(80*32), dim3(64), 0, 0, data_gpu, num_entries, num_iters); timer.stop("decrypt data on GPU"); //std::cout << " checkpoint 0\n"; check_last_error(); //std::cout << " checkpoint 1\n"; timer.start(); // Copy data from GPU to CPU. hipMemcpy(data_cpu, data_gpu, sizeof(uint64_t)*num_entries, hipMemcpyDeviceToHost); timer.stop("copy data from GPU to CPU"); // Stop timer for total time on GPU(s). overall.stop("total time on GPU"); check_last_error(); timer.start(); // Check results on CPU. const bool success = check_result_cpu(data_cpu, num_entries, openmp); std::cout << "STATUS: test " << ( success ? "passed" : "failed") << std::endl; timer.stop("checking result on CPU"); timer.start(); // Free memory. hipHostFree(data_cpu); hipFree (data_gpu); timer.stop("free memory"); check_last_error(); }
da1a3f745d822e7cd6100f5346331d9e1808c3c8.cu
#include <cstdint> #include <iostream> #include "helpers.cuh" #include "encryption.cuh" // Host function. void encrypt_cpu(uint64_t * data, uint64_t num_entries, uint64_t num_iters, bool parallel=true) { // Use OpenMP to use all available CPU cores. #pragma omp parallel for if (parallel) for (uint64_t entry = 0; entry < num_entries; entry++) // Permute each data entry the number of iterations and then write result to data. data[entry] = permute64(entry, num_iters); } // Device function. __global__ void decrypt_gpu(uint64_t * data, uint64_t num_entries, uint64_t num_iters) { const uint64_t thrdID = blockIdx.x*blockDim.x+threadIdx.x; const uint64_t stride = blockDim.x*gridDim.x; //printf(" checkpoint i0\n"); // Utilize grid-stride loop for arbitrary data sizes. for (uint64_t entry = thrdID; entry < num_entries; entry += stride) // Unpermute each data entry the number of iterations then write result to data. data[entry] = unpermute64(data[entry], num_iters); //printf(" checkpoint i1\n"); } // Host function. bool check_result_cpu(uint64_t * data, uint64_t num_entries, bool parallel=true) { uint64_t counter = 0; #pragma omp parallel for reduction(+: counter) if (parallel) for (uint64_t entry = 0; entry < num_entries; entry++) // Because we created initial data values by ranging from 0 to N-1, // and because encrypting and decrypting is symmetrical, // then each data entry should be equal to `entry`. counter += data[entry] == entry; // True if all values have been correctly decrypted. return counter == num_entries; } int main (int argc, char * argv[]) { // This file will be used to cache encryption results // so we don't have to wait on the CPU every time. //const char * encrypted_file = "/dli/task/encrypted"; const char * encrypted_file = "hello"; //"/home/babak/Codes/Learning/HPC/2_Cuda/6_Accelerating_CUDA_C++_Applications_with_Concurrent_Streams/3_Application/hello"; // Timer instance to be used for sections of the application. Timer timer; // Timer instance to be used for total time on the GPU(s). Timer overall; const uint64_t num_entries = 1UL << 26; const uint64_t num_iters = 1UL << 10; // Use all available CPUs in parallel for host calculations. //const bool openmp = true; const bool openmp = false; // This timer start and then stop pattern will be used throughout the application. timer.start(); uint64_t * data_cpu, * data_gpu; // cudaMallocHost will be discussed at length later in the course. cudaMallocHost(&data_cpu, sizeof(uint64_t)*num_entries); cudaMalloc (&data_gpu, sizeof(uint64_t)*num_entries); timer.stop("allocate memory"); check_last_error(); timer.start(); // If encryption cache file does not exist... if (!encrypted_file_exists(encrypted_file)) { // ...encrypt data in parallel on CPU... std::cout << " encrypting... \n "; encrypt_cpu(data_cpu, num_entries, num_iters, openmp); // ...and make encryption cache file for later. write_encrypted_to_file(encrypted_file, data_cpu, sizeof(uint64_t)*num_entries); } else { std::cout << " reading... \n "; // Use encryption cache file if it exists. read_encrypted_from_file(encrypted_file, data_cpu, sizeof(uint64_t)*num_entries); } timer.stop("encrypt data on CPU"); // Begin timing for total time on GPU(s). overall.start(); timer.start(); // Data copy from CPU to GPU. cudaMemcpy(data_gpu, data_cpu, sizeof(uint64_t)*num_entries, cudaMemcpyHostToDevice); timer.stop("copy data from CPU to GPU"); check_last_error(); timer.start(); // Decrypt data on GPU(s). decrypt_gpu<<<80*32, 64>>>(data_gpu, num_entries, num_iters); timer.stop("decrypt data on GPU"); //std::cout << " checkpoint 0\n"; check_last_error(); //std::cout << " checkpoint 1\n"; timer.start(); // Copy data from GPU to CPU. cudaMemcpy(data_cpu, data_gpu, sizeof(uint64_t)*num_entries, cudaMemcpyDeviceToHost); timer.stop("copy data from GPU to CPU"); // Stop timer for total time on GPU(s). overall.stop("total time on GPU"); check_last_error(); timer.start(); // Check results on CPU. const bool success = check_result_cpu(data_cpu, num_entries, openmp); std::cout << "STATUS: test " << ( success ? "passed" : "failed") << std::endl; timer.stop("checking result on CPU"); timer.start(); // Free memory. cudaFreeHost(data_cpu); cudaFree (data_gpu); timer.stop("free memory"); check_last_error(); }
daca3de2717d7351869306dc0b001385549a0092.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef PADDLE_WITH_HIP #include <hipcub/hipcub.hpp> namespace cub = hipcub; #else #include <hipcub/hipcub.hpp> #endif #include <vector> #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/margin_cross_entropy_op.h" #include "paddle/fluid/operators/math/softmax_impl.h" #include "paddle/fluid/operators/reduce_ops/reduce_op.cu.h" #include "paddle/fluid/operators/reduce_ops/reduce_op.h" #include "paddle/fluid/string/string_helper.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/kernels/funcs/axis_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/ProcessGroup.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { using Tensor = framework::Tensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } void GetClassInterval(const gpuStream_t& stream, const platform::Place& place, const platform::DeviceContext& ctx, const int rid, const int rank, const int nranks, const int D, Tensor* class_interval) { std::vector<int> shard_dim_vec(nranks + 1, 0); shard_dim_vec[rank + 1] = D; if (nranks <= 1) { framework::TensorFromVector(shard_dim_vec, ctx, class_interval); return; } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) Tensor num_classes_per_device; framework::TensorFromVector(shard_dim_vec, ctx, &num_classes_per_device); int* num_classes_per_device_ptr = num_classes_per_device.data<int>(); auto map = distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(rid)) { // Use ProcessGroup distributed::ProcessGroup* pg = map->get(rid); std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(num_classes_per_device); out_tensor.push_back(num_classes_per_device); distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { const auto& comm = platform::NCCLCommContext::Instance().Get(rid, place); // use global calculate stream const auto calcu_stream = static_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(place)) ->stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( num_classes_per_device_ptr, num_classes_per_device_ptr, num_classes_per_device.numel(), platform::ToNCCLDataType( framework::TransToProtoVarType(num_classes_per_device.dtype())), ncclSum, comm->comm(), calcu_stream)); } auto class_interval_ptr = class_interval->mutable_data<int>({nranks + 1}, place); size_t cub_temp_storage_bytes = 0; hipcub::DeviceScan::InclusiveSum<int*, int*>( nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream); auto cub_temp_storage = memory::Alloc(place, cub_temp_storage_bytes); hipcub::DeviceScan::InclusiveSum<int*, int*>( cub_temp_storage->ptr(), cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, stream); return; #endif } template <typename T, typename IndexT> __global__ void AddMarginToPositiveLogitsKernel( T* logit, const IndexT* label, const float margin1, const float margin2, const float margin3, const int rank, const int nranks, const int64_t N, const int64_t D, const int* class_interval_ptr) { using MPType = typename details::MPTypeTrait<T>::Type; int start_index = class_interval_ptr[rank]; int end_index = class_interval_ptr[rank + 1]; int num_classes = class_interval_ptr[nranks]; CUDA_KERNEL_LOOP(i, N) { auto real_label = label[i]; PADDLE_ENFORCE((real_label < num_classes) && (real_label >= 0), "The index is out of bounds, " "please check whether the value of label and " "input meet the number of class. It should " "be less than [%d], but received [%d]", num_classes, real_label); if (real_label >= start_index && real_label < end_index) { int64_t offset = i * D + real_label - start_index; if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) { MPType x = static_cast<MPType>(logit[offset]); MPType theta = acos(x); if (fabs(margin1 - 1.0) > 1e-8) { theta *= static_cast<MPType>(margin1); } if (fabs(margin2) > 1e-8) { theta += static_cast<MPType>(margin2); } logit[offset] = static_cast<T>(cos(theta)); } if (fabs(margin3) > 1e-8) { MPType y = static_cast<MPType>(logit[offset]); y -= static_cast<MPType>(margin3); logit[offset] = static_cast<T>(y); } } } } template <typename T> __global__ void ScaleLogitKernel(T* logits, const float scale, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { logits[i] *= static_cast<T>(scale); } } template <typename T> __global__ void LogitsMinusMaxKernel(T* logits, const T* logits_max_per_row, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; logits[i] -= logits_max_per_row[row]; } } template <typename T> __global__ void LogitsMinusLogSumKernel(T* logits, const T* logits_sum_per_row, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; logits[i] -= kps::details::Log(logits_sum_per_row[row]); } } template <typename T, typename IndexT> __global__ void HardLabelSoftmaxWithCrossEntropyKernel( T* loss, T* log_softmax, const IndexT* labels, const int rank, const int64_t N, const int64_t D, const int* class_interval_ptr) { int start_index = class_interval_ptr[rank]; CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; auto col = i % D; if ((col + start_index) == labels[row]) { auto softmax = log_softmax[i]; loss[row] = -softmax; log_softmax[i] = kps::details::Exp(softmax); } else { log_softmax[i] = kps::details::Exp(log_softmax[i]); } } } template <typename T, typename IndexT> __global__ void CalculateGrad(T* logits_grad, const T* loss_grad, const T* logits, const IndexT* labels, const float margin1, const float margin2, const float scale, const int rank, const int64_t N, const int64_t D, const int* class_interval_ptr) { using MPType = typename details::MPTypeTrait<T>::Type; int start_index = class_interval_ptr[rank]; CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; auto col = i % D; if ((col + start_index) == labels[row]) { logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row]; if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) { MPType dout = static_cast<MPType>(logits_grad[i]); MPType one = static_cast<MPType>(1.0f); MPType x = static_cast<MPType>(logits[i]); MPType m1 = static_cast<MPType>(margin1); MPType m2 = static_cast<MPType>(margin2); MPType d = m1 * sin(m1 * acos(x) + m2) / sqrt(one - x * x); logits_grad[i] = static_cast<T>(dout * d); } } else { logits_grad[i] *= loss_grad[row]; } if (fabs(scale - 1.0) > 1e-8) { logits_grad[i] *= static_cast<T>(scale); } } } template <typename T> class MarginCrossEntropyOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* logits = ctx.Input<Tensor>("Logits"); const Tensor* labels = ctx.Input<Tensor>("Label"); Tensor* softmax = ctx.Output<Tensor>("Softmax"); Tensor* loss = ctx.Output<Tensor>("Loss"); const int rid = ctx.Attr<int>("ring_id"); const int nranks = ctx.Attr<int>("nranks"); const int rank = ctx.Attr<int>("rank"); const float margin1 = ctx.Attr<float>("margin1"); const float margin2 = ctx.Attr<float>("margin2"); const float margin3 = ctx.Attr<float>("margin3"); const float scale = ctx.Attr<float>("scale"); const auto& place = ctx.GetPlace(); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) platform::NCCLComm* comm; distributed::ProcessGroup* pg = nullptr; gpuStream_t stream; if (nranks > 1) { auto map = distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(rid)) { // Use ProcessGroup pg = map->get(rid); } else { comm = platform::NCCLCommContext::Instance().Get(rid, place); // use global calculate stream stream = static_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(place)) ->stream(); } } #endif // allocate memory on device. T* softmax_ptr = softmax->mutable_data<T>(place); T* loss_ptr = loss->mutable_data<T>(place); const auto& logits_dims = logits->dims(); const auto& labels_dims = labels->dims(); const int axis = logits_dims.size() - 1; const int N = phi::funcs::SizeToAxis(axis, logits_dims); const int D = phi::funcs::SizeFromAxis(axis, logits_dims); int blocks = NumBlocks(N); int threads = kNumCUDAThreads; const auto& label_type = framework::TransToProtoVarType(labels->dtype()); // copy logits to softmax variable since we can't modify logits, // and it also be used when calculate grad framework::TensorCopy(*logits, ctx.GetPlace(), ctx.device_context(), softmax); Tensor softmax_2d; softmax_2d.ShareDataWith(*softmax).Resize({N, D}); T* logits_ptr = softmax_2d.data<T>(); Tensor class_interval; GetClassInterval(dev_ctx.stream(), place, ctx.cuda_device_context(), rid, rank, nranks, D, &class_interval); // step 1, preprocess logits // add margin for positive elements // theta = acos(x_i) // (cos(m1 * theta + m2) - m3) // save match_logits, used for gradient computation. if (label_type == framework::proto::VarType::INT32) { typedef int32_t LabelT; hipLaunchKernelGGL(( AddMarginToPositiveLogitsKernel< T>), dim3(NumBlocks(N)), dim3(threads), 0, dev_ctx.stream(), logits_ptr, labels->data<LabelT>(), margin1, margin2, margin3, rank, nranks, N, D, class_interval.data<int>()); } else if (label_type == framework::proto::VarType::INT64) { typedef int64_t LabelT; hipLaunchKernelGGL(( AddMarginToPositiveLogitsKernel< T>), dim3(NumBlocks(N)), dim3(threads), 0, dev_ctx.stream(), logits_ptr, labels->data<LabelT>(), margin1, margin2, margin3, rank, nranks, N, D, class_interval.data<int>()); } else { PADDLE_THROW(platform::errors::Unimplemented( "margin_cross_entropy label type noly support int32 and int64, " "but got %s", label_type)); } // scale by s hipLaunchKernelGGL(( ScaleLogitKernel<T>), dim3(NumBlocks(N * D)), dim3(threads), 0, dev_ctx.stream(), logits_ptr, scale, N, D); // step 2, obtain logit_max Tensor logits_max; logits_max = ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx); T* logits_max_buff = logits_max.mutable_data<T>(place); TensorReduceImpl<T, T, kps::MaxFunctor, kps::IdentityFunctor<T>>( dev_ctx, softmax_2d, &logits_max, kps::IdentityFunctor<T>(), {1}, dev_ctx.stream()); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(logits_max); out_tensor.push_back(logits_max); distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::MAX; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( logits_max_buff, logits_max_buff, logits_max.numel(), platform::ToNCCLDataType( framework::TransToProtoVarType(logits_max.dtype())), ncclMax, comm->comm(), stream)); } } #endif // step 3, logit - logit_max hipLaunchKernelGGL(( LogitsMinusMaxKernel<T>), dim3(NumBlocks(N * D)), dim3(threads), 0, dev_ctx.stream(), logits_ptr, logits_max_buff, N, D); // step 4, sum(exp(logit - logit_max)) Tensor sum_exp_logits; sum_exp_logits = ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx); T* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place); TensorReduceImpl<T, T, kps::AddFunctor, kps::ExpFunctor<T>>( dev_ctx, softmax_2d, &sum_exp_logits, kps::ExpFunctor<T>(), {1}, dev_ctx.stream()); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(sum_exp_logits); out_tensor.push_back(sum_exp_logits); distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sum_exp_logits_buff, sum_exp_logits_buff, sum_exp_logits.numel(), platform::ToNCCLDataType( framework::TransToProtoVarType(sum_exp_logits.dtype())), ncclSum, comm->comm(), stream)); } } #endif // step 5, (logit - logit_max) - log(sum(exp(logit - logit_max))) hipLaunchKernelGGL(( LogitsMinusLogSumKernel< T>), dim3(NumBlocks(N * D)), dim3(threads), 0, dev_ctx.stream(), logits_ptr, sum_exp_logits_buff, N, D); // step 6, prob = exp((logit - logit_max) - log(sum(exp(logit - // logit_max)))) // loss = -((logit_i - logit_max) - log(sum(exp(logit - logit_max)))) phi::funcs::SetConstant<platform::CUDADeviceContext, T>()( dev_ctx, loss, static_cast<T>(0.0)); if (label_type == framework::proto::VarType::INT32) { typedef int32_t LabelT; hipLaunchKernelGGL(( HardLabelSoftmaxWithCrossEntropyKernel< T, LabelT>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_ptr, logits_ptr, labels->data<LabelT>(), rank, N, D, class_interval.data<int>()); } else if (label_type == framework::proto::VarType::INT64) { typedef int64_t LabelT; hipLaunchKernelGGL(( HardLabelSoftmaxWithCrossEntropyKernel< T, LabelT>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_ptr, logits_ptr, labels->data<LabelT>(), rank, N, D, class_interval.data<int>()); } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(*loss); out_tensor.push_back(*loss); distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( loss_ptr, loss_ptr, loss->numel(), platform::ToNCCLDataType( framework::TransToProtoVarType(loss->dtype())), ncclSum, comm->comm(), stream)); } } #endif } }; template <typename T> class MarginCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* labels = context.Input<Tensor>("Label"); const Tensor* logits = context.Input<Tensor>("Logits"); const Tensor* softmax = context.Input<Tensor>("Softmax"); const Tensor* loss_grad = context.Input<Tensor>(framework::GradVarName("Loss")); Tensor* logit_grad = context.Output<Tensor>(framework::GradVarName("Logits")); const bool return_softmax = context.Attr<bool>("return_softmax"); const int rid = context.Attr<int>("ring_id"); const int nranks = context.Attr<int>("nranks"); const int rank = context.Attr<int>("rank"); const float margin1 = context.Attr<float>("margin1"); const float margin2 = context.Attr<float>("margin2"); const float margin3 = context.Attr<float>("margin3"); const float scale = context.Attr<float>("scale"); auto& dev_ctx = context.template device_context<platform::CUDADeviceContext>(); const auto sofrmax_dims = softmax->dims(); const int axis = sofrmax_dims.size() - 1; const int N = phi::funcs::SizeToAxis(axis, sofrmax_dims); const int D = phi::funcs::SizeFromAxis(axis, sofrmax_dims); if (return_softmax) { framework::TensorCopy(*softmax, context.GetPlace(), context.device_context(), logit_grad); } else { logit_grad->ShareDataWith(*softmax); } int blocks = NumBlocks(N * D); int threads = kNumCUDAThreads; const auto& label_type = framework::TransToProtoVarType(labels->dtype()); Tensor class_interval; GetClassInterval(dev_ctx.stream(), context.GetPlace(), context.cuda_device_context(), rid, rank, nranks, D, &class_interval); if (label_type == framework::proto::VarType::INT32) { typedef int32_t LabelT; hipLaunchKernelGGL(( CalculateGrad<T, LabelT>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logit_grad->data<T>(), loss_grad->data<T>(), logits->data<T>(), labels->data<LabelT>(), margin1, margin2, scale, rank, N, D, class_interval.data<int>()); } else if (label_type == framework::proto::VarType::INT64) { typedef int64_t LabelT; hipLaunchKernelGGL(( CalculateGrad<T, LabelT>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logit_grad->data<T>(), loss_grad->data<T>(), logits->data<T>(), labels->data<LabelT>(), margin1, margin2, scale, rank, N, D, class_interval.data<int>()); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(margin_cross_entropy, ops::MarginCrossEntropyOpCUDAKernel<float>, ops::MarginCrossEntropyOpCUDAKernel<double>, ops::MarginCrossEntropyOpCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(margin_cross_entropy_grad, ops::MarginCrossEntropyGradCUDAKernel<float>, ops::MarginCrossEntropyGradCUDAKernel<double>, ops::MarginCrossEntropyGradCUDAKernel<plat::float16>);
daca3de2717d7351869306dc0b001385549a0092.cu
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef PADDLE_WITH_HIP #include <hipcub/hipcub.hpp> namespace cub = hipcub; #else #include <cub/cub.cuh> #endif #include <vector> #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/margin_cross_entropy_op.h" #include "paddle/fluid/operators/math/softmax_impl.h" #include "paddle/fluid/operators/reduce_ops/reduce_op.cu.h" #include "paddle/fluid/operators/reduce_ops/reduce_op.h" #include "paddle/fluid/string/string_helper.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/kernels/funcs/axis_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/ProcessGroup.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { using Tensor = framework::Tensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } void GetClassInterval(const gpuStream_t& stream, const platform::Place& place, const platform::DeviceContext& ctx, const int rid, const int rank, const int nranks, const int D, Tensor* class_interval) { std::vector<int> shard_dim_vec(nranks + 1, 0); shard_dim_vec[rank + 1] = D; if (nranks <= 1) { framework::TensorFromVector(shard_dim_vec, ctx, class_interval); return; } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) Tensor num_classes_per_device; framework::TensorFromVector(shard_dim_vec, ctx, &num_classes_per_device); int* num_classes_per_device_ptr = num_classes_per_device.data<int>(); auto map = distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(rid)) { // Use ProcessGroup distributed::ProcessGroup* pg = map->get(rid); std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(num_classes_per_device); out_tensor.push_back(num_classes_per_device); distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { const auto& comm = platform::NCCLCommContext::Instance().Get(rid, place); // use global calculate stream const auto calcu_stream = static_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(place)) ->stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( num_classes_per_device_ptr, num_classes_per_device_ptr, num_classes_per_device.numel(), platform::ToNCCLDataType( framework::TransToProtoVarType(num_classes_per_device.dtype())), ncclSum, comm->comm(), calcu_stream)); } auto class_interval_ptr = class_interval->mutable_data<int>({nranks + 1}, place); size_t cub_temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum<int*, int*>( nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream); auto cub_temp_storage = memory::Alloc(place, cub_temp_storage_bytes); cub::DeviceScan::InclusiveSum<int*, int*>( cub_temp_storage->ptr(), cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, stream); return; #endif } template <typename T, typename IndexT> __global__ void AddMarginToPositiveLogitsKernel( T* logit, const IndexT* label, const float margin1, const float margin2, const float margin3, const int rank, const int nranks, const int64_t N, const int64_t D, const int* class_interval_ptr) { using MPType = typename details::MPTypeTrait<T>::Type; int start_index = class_interval_ptr[rank]; int end_index = class_interval_ptr[rank + 1]; int num_classes = class_interval_ptr[nranks]; CUDA_KERNEL_LOOP(i, N) { auto real_label = label[i]; PADDLE_ENFORCE((real_label < num_classes) && (real_label >= 0), "The index is out of bounds, " "please check whether the value of label and " "input meet the number of class. It should " "be less than [%d], but received [%d]", num_classes, real_label); if (real_label >= start_index && real_label < end_index) { int64_t offset = i * D + real_label - start_index; if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) { MPType x = static_cast<MPType>(logit[offset]); MPType theta = acos(x); if (fabs(margin1 - 1.0) > 1e-8) { theta *= static_cast<MPType>(margin1); } if (fabs(margin2) > 1e-8) { theta += static_cast<MPType>(margin2); } logit[offset] = static_cast<T>(cos(theta)); } if (fabs(margin3) > 1e-8) { MPType y = static_cast<MPType>(logit[offset]); y -= static_cast<MPType>(margin3); logit[offset] = static_cast<T>(y); } } } } template <typename T> __global__ void ScaleLogitKernel(T* logits, const float scale, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { logits[i] *= static_cast<T>(scale); } } template <typename T> __global__ void LogitsMinusMaxKernel(T* logits, const T* logits_max_per_row, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; logits[i] -= logits_max_per_row[row]; } } template <typename T> __global__ void LogitsMinusLogSumKernel(T* logits, const T* logits_sum_per_row, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; logits[i] -= kps::details::Log(logits_sum_per_row[row]); } } template <typename T, typename IndexT> __global__ void HardLabelSoftmaxWithCrossEntropyKernel( T* loss, T* log_softmax, const IndexT* labels, const int rank, const int64_t N, const int64_t D, const int* class_interval_ptr) { int start_index = class_interval_ptr[rank]; CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; auto col = i % D; if ((col + start_index) == labels[row]) { auto softmax = log_softmax[i]; loss[row] = -softmax; log_softmax[i] = kps::details::Exp(softmax); } else { log_softmax[i] = kps::details::Exp(log_softmax[i]); } } } template <typename T, typename IndexT> __global__ void CalculateGrad(T* logits_grad, const T* loss_grad, const T* logits, const IndexT* labels, const float margin1, const float margin2, const float scale, const int rank, const int64_t N, const int64_t D, const int* class_interval_ptr) { using MPType = typename details::MPTypeTrait<T>::Type; int start_index = class_interval_ptr[rank]; CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; auto col = i % D; if ((col + start_index) == labels[row]) { logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row]; if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) { MPType dout = static_cast<MPType>(logits_grad[i]); MPType one = static_cast<MPType>(1.0f); MPType x = static_cast<MPType>(logits[i]); MPType m1 = static_cast<MPType>(margin1); MPType m2 = static_cast<MPType>(margin2); MPType d = m1 * sin(m1 * acos(x) + m2) / sqrt(one - x * x); logits_grad[i] = static_cast<T>(dout * d); } } else { logits_grad[i] *= loss_grad[row]; } if (fabs(scale - 1.0) > 1e-8) { logits_grad[i] *= static_cast<T>(scale); } } } template <typename T> class MarginCrossEntropyOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* logits = ctx.Input<Tensor>("Logits"); const Tensor* labels = ctx.Input<Tensor>("Label"); Tensor* softmax = ctx.Output<Tensor>("Softmax"); Tensor* loss = ctx.Output<Tensor>("Loss"); const int rid = ctx.Attr<int>("ring_id"); const int nranks = ctx.Attr<int>("nranks"); const int rank = ctx.Attr<int>("rank"); const float margin1 = ctx.Attr<float>("margin1"); const float margin2 = ctx.Attr<float>("margin2"); const float margin3 = ctx.Attr<float>("margin3"); const float scale = ctx.Attr<float>("scale"); const auto& place = ctx.GetPlace(); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) platform::NCCLComm* comm; distributed::ProcessGroup* pg = nullptr; gpuStream_t stream; if (nranks > 1) { auto map = distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(rid)) { // Use ProcessGroup pg = map->get(rid); } else { comm = platform::NCCLCommContext::Instance().Get(rid, place); // use global calculate stream stream = static_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(place)) ->stream(); } } #endif // allocate memory on device. T* softmax_ptr = softmax->mutable_data<T>(place); T* loss_ptr = loss->mutable_data<T>(place); const auto& logits_dims = logits->dims(); const auto& labels_dims = labels->dims(); const int axis = logits_dims.size() - 1; const int N = phi::funcs::SizeToAxis(axis, logits_dims); const int D = phi::funcs::SizeFromAxis(axis, logits_dims); int blocks = NumBlocks(N); int threads = kNumCUDAThreads; const auto& label_type = framework::TransToProtoVarType(labels->dtype()); // copy logits to softmax variable since we can't modify logits, // and it also be used when calculate grad framework::TensorCopy(*logits, ctx.GetPlace(), ctx.device_context(), softmax); Tensor softmax_2d; softmax_2d.ShareDataWith(*softmax).Resize({N, D}); T* logits_ptr = softmax_2d.data<T>(); Tensor class_interval; GetClassInterval(dev_ctx.stream(), place, ctx.cuda_device_context(), rid, rank, nranks, D, &class_interval); // step 1, preprocess logits // add margin for positive elements // theta = acos(x_i) // (cos(m1 * theta + m2) - m3) // save match_logits, used for gradient computation. if (label_type == framework::proto::VarType::INT32) { typedef int32_t LabelT; AddMarginToPositiveLogitsKernel< T><<<NumBlocks(N), threads, 0, dev_ctx.stream()>>>( logits_ptr, labels->data<LabelT>(), margin1, margin2, margin3, rank, nranks, N, D, class_interval.data<int>()); } else if (label_type == framework::proto::VarType::INT64) { typedef int64_t LabelT; AddMarginToPositiveLogitsKernel< T><<<NumBlocks(N), threads, 0, dev_ctx.stream()>>>( logits_ptr, labels->data<LabelT>(), margin1, margin2, margin3, rank, nranks, N, D, class_interval.data<int>()); } else { PADDLE_THROW(platform::errors::Unimplemented( "margin_cross_entropy label type noly support int32 and int64, " "but got %s", label_type)); } // scale by s ScaleLogitKernel<T><<<NumBlocks(N * D), threads, 0, dev_ctx.stream()>>>( logits_ptr, scale, N, D); // step 2, obtain logit_max Tensor logits_max; logits_max = ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx); T* logits_max_buff = logits_max.mutable_data<T>(place); TensorReduceImpl<T, T, kps::MaxFunctor, kps::IdentityFunctor<T>>( dev_ctx, softmax_2d, &logits_max, kps::IdentityFunctor<T>(), {1}, dev_ctx.stream()); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(logits_max); out_tensor.push_back(logits_max); distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::MAX; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( logits_max_buff, logits_max_buff, logits_max.numel(), platform::ToNCCLDataType( framework::TransToProtoVarType(logits_max.dtype())), ncclMax, comm->comm(), stream)); } } #endif // step 3, logit - logit_max LogitsMinusMaxKernel<T><<<NumBlocks(N * D), threads, 0, dev_ctx.stream()>>>( logits_ptr, logits_max_buff, N, D); // step 4, sum(exp(logit - logit_max)) Tensor sum_exp_logits; sum_exp_logits = ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx); T* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place); TensorReduceImpl<T, T, kps::AddFunctor, kps::ExpFunctor<T>>( dev_ctx, softmax_2d, &sum_exp_logits, kps::ExpFunctor<T>(), {1}, dev_ctx.stream()); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(sum_exp_logits); out_tensor.push_back(sum_exp_logits); distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sum_exp_logits_buff, sum_exp_logits_buff, sum_exp_logits.numel(), platform::ToNCCLDataType( framework::TransToProtoVarType(sum_exp_logits.dtype())), ncclSum, comm->comm(), stream)); } } #endif // step 5, (logit - logit_max) - log(sum(exp(logit - logit_max))) LogitsMinusLogSumKernel< T><<<NumBlocks(N * D), threads, 0, dev_ctx.stream()>>>( logits_ptr, sum_exp_logits_buff, N, D); // step 6, prob = exp((logit - logit_max) - log(sum(exp(logit - // logit_max)))) // loss = -((logit_i - logit_max) - log(sum(exp(logit - logit_max)))) phi::funcs::SetConstant<platform::CUDADeviceContext, T>()( dev_ctx, loss, static_cast<T>(0.0)); if (label_type == framework::proto::VarType::INT32) { typedef int32_t LabelT; HardLabelSoftmaxWithCrossEntropyKernel< T, LabelT><<<blocks, threads, 0, dev_ctx.stream()>>>( loss_ptr, logits_ptr, labels->data<LabelT>(), rank, N, D, class_interval.data<int>()); } else if (label_type == framework::proto::VarType::INT64) { typedef int64_t LabelT; HardLabelSoftmaxWithCrossEntropyKernel< T, LabelT><<<blocks, threads, 0, dev_ctx.stream()>>>( loss_ptr, logits_ptr, labels->data<LabelT>(), rank, N, D, class_interval.data<int>()); } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(*loss); out_tensor.push_back(*loss); distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( loss_ptr, loss_ptr, loss->numel(), platform::ToNCCLDataType( framework::TransToProtoVarType(loss->dtype())), ncclSum, comm->comm(), stream)); } } #endif } }; template <typename T> class MarginCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* labels = context.Input<Tensor>("Label"); const Tensor* logits = context.Input<Tensor>("Logits"); const Tensor* softmax = context.Input<Tensor>("Softmax"); const Tensor* loss_grad = context.Input<Tensor>(framework::GradVarName("Loss")); Tensor* logit_grad = context.Output<Tensor>(framework::GradVarName("Logits")); const bool return_softmax = context.Attr<bool>("return_softmax"); const int rid = context.Attr<int>("ring_id"); const int nranks = context.Attr<int>("nranks"); const int rank = context.Attr<int>("rank"); const float margin1 = context.Attr<float>("margin1"); const float margin2 = context.Attr<float>("margin2"); const float margin3 = context.Attr<float>("margin3"); const float scale = context.Attr<float>("scale"); auto& dev_ctx = context.template device_context<platform::CUDADeviceContext>(); const auto sofrmax_dims = softmax->dims(); const int axis = sofrmax_dims.size() - 1; const int N = phi::funcs::SizeToAxis(axis, sofrmax_dims); const int D = phi::funcs::SizeFromAxis(axis, sofrmax_dims); if (return_softmax) { framework::TensorCopy(*softmax, context.GetPlace(), context.device_context(), logit_grad); } else { logit_grad->ShareDataWith(*softmax); } int blocks = NumBlocks(N * D); int threads = kNumCUDAThreads; const auto& label_type = framework::TransToProtoVarType(labels->dtype()); Tensor class_interval; GetClassInterval(dev_ctx.stream(), context.GetPlace(), context.cuda_device_context(), rid, rank, nranks, D, &class_interval); if (label_type == framework::proto::VarType::INT32) { typedef int32_t LabelT; CalculateGrad<T, LabelT><<<blocks, threads, 0, dev_ctx.stream()>>>( logit_grad->data<T>(), loss_grad->data<T>(), logits->data<T>(), labels->data<LabelT>(), margin1, margin2, scale, rank, N, D, class_interval.data<int>()); } else if (label_type == framework::proto::VarType::INT64) { typedef int64_t LabelT; CalculateGrad<T, LabelT><<<blocks, threads, 0, dev_ctx.stream()>>>( logit_grad->data<T>(), loss_grad->data<T>(), logits->data<T>(), labels->data<LabelT>(), margin1, margin2, scale, rank, N, D, class_interval.data<int>()); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(margin_cross_entropy, ops::MarginCrossEntropyOpCUDAKernel<float>, ops::MarginCrossEntropyOpCUDAKernel<double>, ops::MarginCrossEntropyOpCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(margin_cross_entropy_grad, ops::MarginCrossEntropyGradCUDAKernel<float>, ops::MarginCrossEntropyGradCUDAKernel<double>, ops::MarginCrossEntropyGradCUDAKernel<plat::float16>);
6621041c270fab98495e3b3ea871fcda7f3289b7.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <algorithm> #include <hip/hip_runtime.h> #include <sys/time.h> static const int ThreadsPerBlock = 512; static __global__ void collatz(const long upper, int* const maxlen) { const long i = threadIdx.x + blockIdx.x * (long)blockDim.x; // compute sequence lengths if (i < (upper + 1)/2) { long val = 2*i + 1; // translate to i-th odd int len = 1; while (val != 1) { len++; if ((val % 2) == 0) { val = val / 2; // even } else { val = 3 * val + 1; // odd } } if(len > *maxlen) atomicMax(maxlen, len); // per instructions } } static void CheckCuda() { hipError_t e; hipDeviceSynchronize(); if (hipSuccess != (e = hipGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, hipGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Collatz v1.2\n"); // check command line if (argc != 2) {fprintf(stderr, "USAGE: %s upper_bound\n", argv[0]); exit(-1);} const long upper = atol(argv[1]); if (upper < 5) {fprintf(stderr, "ERROR: upper_bound must be at least 5\n"); exit(-1);} if ((upper % 2) != 1) {fprintf(stderr, "ERROR: upper_bound must be an odd number\n"); exit(-1);} printf("upper bound: %ld\n", upper); // allocate cpu vars int* const maxlen = new int; *maxlen = 0; // allocate gpu vars int* d_maxlen; if (hipSuccess != hipMalloc((void **)&d_maxlen, sizeof(int))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} // initialize gpu vars if (hipSuccess != hipMemcpy(d_maxlen, maxlen, sizeof(int), hipMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // execute timed code // because we're only testing odd values, there are (upper+1)/2 number of iterations hipLaunchKernelGGL(( collatz), dim3(((upper+1)/2 + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, upper, d_maxlen); hipDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.4f s\n", runtime); // get result from GPU CheckCuda(); if (hipSuccess != hipMemcpy(maxlen, d_maxlen, sizeof(int), hipMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying to host failed\n"); exit(-1);} // print result printf("longest sequence: %d elements\n", *maxlen); // clean up free(maxlen); hipFree(d_maxlen); return 0; }
6621041c270fab98495e3b3ea871fcda7f3289b7.cu
#include <cstdio> #include <algorithm> #include <cuda.h> #include <sys/time.h> static const int ThreadsPerBlock = 512; static __global__ void collatz(const long upper, int* const maxlen) { const long i = threadIdx.x + blockIdx.x * (long)blockDim.x; // compute sequence lengths if (i < (upper + 1)/2) { long val = 2*i + 1; // translate to i-th odd int len = 1; while (val != 1) { len++; if ((val % 2) == 0) { val = val / 2; // even } else { val = 3 * val + 1; // odd } } if(len > *maxlen) atomicMax(maxlen, len); // per instructions } } static void CheckCuda() { cudaError_t e; cudaDeviceSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, cudaGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Collatz v1.2\n"); // check command line if (argc != 2) {fprintf(stderr, "USAGE: %s upper_bound\n", argv[0]); exit(-1);} const long upper = atol(argv[1]); if (upper < 5) {fprintf(stderr, "ERROR: upper_bound must be at least 5\n"); exit(-1);} if ((upper % 2) != 1) {fprintf(stderr, "ERROR: upper_bound must be an odd number\n"); exit(-1);} printf("upper bound: %ld\n", upper); // allocate cpu vars int* const maxlen = new int; *maxlen = 0; // allocate gpu vars int* d_maxlen; if (cudaSuccess != cudaMalloc((void **)&d_maxlen, sizeof(int))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} // initialize gpu vars if (cudaSuccess != cudaMemcpy(d_maxlen, maxlen, sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // execute timed code // because we're only testing odd values, there are (upper+1)/2 number of iterations collatz<<<((upper+1)/2 + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(upper, d_maxlen); cudaDeviceSynchronize(); // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.4f s\n", runtime); // get result from GPU CheckCuda(); if (cudaSuccess != cudaMemcpy(maxlen, d_maxlen, sizeof(int), cudaMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying to host failed\n"); exit(-1);} // print result printf("longest sequence: %d elements\n", *maxlen); // clean up free(maxlen); cudaFree(d_maxlen); return 0; }
599de649287d85e6f408530a6d3bda66551809ca.hip
// !!! This is a file automatically generated by hipify!!! #include "rnn/attention.h" #include "graph/node_operators_binary.h" #include "kernels/tensor_operators.h" namespace marian { namespace rnn { struct AttentionNodeOp : public NaryNodeOp { AttentionNodeOp(const std::vector<Expr>& nodes) : NaryNodeOp(nodes, keywords::shape = newShape(nodes)) {} Shape newShape(const std::vector<Expr>& nodes) { Shape shape = nodes[1]->shape(); Shape vaShape = nodes[0]->shape(); Shape ctxShape = nodes[1]->shape(); Shape stateShape = nodes[2]->shape(); for(int i = 0; i < stateShape.size(); ++i) { UTIL_THROW_IF2(ctxShape[i] != stateShape[i] && ctxShape[i] != 1 && stateShape[i] != 1, "Shapes cannot be broadcasted"); shape.set(i, ::max(ctxShape[i], stateShape[i])); } UTIL_THROW_IF2(vaShape[0] != shape[1] || vaShape[1] != 1, "Wrong size"); shape.set(1, 1); return shape; } NodeOps forwardOps() { return {NodeOp(Att(val_, child(0)->val(), child(1)->val(), child(2)->val(), children_.size() == 4 ? child(3)->val() : nullptr))}; } NodeOps backwardOps() { return { NodeOp(AttBack(child(0)->grad(), child(1)->grad(), child(2)->grad(), children_.size() == 4 ? child(3)->grad() : nullptr, child(0)->val(), child(1)->val(), child(2)->val(), children_.size() == 4 ? child(3)->val() : nullptr, adj_);) }; } // do not check if node is trainable virtual void runBackward(const NodeOps& ops) { for(auto&& op : ops) op(); } const std::string type() { return "Att-ops"; } const std::string color() { return "yellow"; } }; Expr attOps(Expr va, Expr context, Expr state, Expr coverage) { std::vector<Expr> nodes{va, context, state}; if(coverage) nodes.push_back(coverage); int dimBatch = context->shape()[0]; int dimWords = context->shape()[2]; int dimBeam = state->shape()[3]; return reshape(Expression<AttentionNodeOp>(nodes), {dimWords, dimBatch, 1, dimBeam}); } } }
599de649287d85e6f408530a6d3bda66551809ca.cu
#include "rnn/attention.h" #include "graph/node_operators_binary.h" #include "kernels/tensor_operators.h" namespace marian { namespace rnn { struct AttentionNodeOp : public NaryNodeOp { AttentionNodeOp(const std::vector<Expr>& nodes) : NaryNodeOp(nodes, keywords::shape = newShape(nodes)) {} Shape newShape(const std::vector<Expr>& nodes) { Shape shape = nodes[1]->shape(); Shape vaShape = nodes[0]->shape(); Shape ctxShape = nodes[1]->shape(); Shape stateShape = nodes[2]->shape(); for(int i = 0; i < stateShape.size(); ++i) { UTIL_THROW_IF2(ctxShape[i] != stateShape[i] && ctxShape[i] != 1 && stateShape[i] != 1, "Shapes cannot be broadcasted"); shape.set(i, std::max(ctxShape[i], stateShape[i])); } UTIL_THROW_IF2(vaShape[0] != shape[1] || vaShape[1] != 1, "Wrong size"); shape.set(1, 1); return shape; } NodeOps forwardOps() { return {NodeOp(Att(val_, child(0)->val(), child(1)->val(), child(2)->val(), children_.size() == 4 ? child(3)->val() : nullptr))}; } NodeOps backwardOps() { return { NodeOp(AttBack(child(0)->grad(), child(1)->grad(), child(2)->grad(), children_.size() == 4 ? child(3)->grad() : nullptr, child(0)->val(), child(1)->val(), child(2)->val(), children_.size() == 4 ? child(3)->val() : nullptr, adj_);) }; } // do not check if node is trainable virtual void runBackward(const NodeOps& ops) { for(auto&& op : ops) op(); } const std::string type() { return "Att-ops"; } const std::string color() { return "yellow"; } }; Expr attOps(Expr va, Expr context, Expr state, Expr coverage) { std::vector<Expr> nodes{va, context, state}; if(coverage) nodes.push_back(coverage); int dimBatch = context->shape()[0]; int dimWords = context->shape()[2]; int dimBeam = state->shape()[3]; return reshape(Expression<AttentionNodeOp>(nodes), {dimWords, dimBatch, 1, dimBeam}); } } }
4c77fceac13fcfad31cfcfae7574ac62ef8caed9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/gather.h> #include <thrust/reverse.h> #include <thrust/scan.h> #include "hipcub/hipcub.hpp" #include "paddle/fluid/operators/cum_op.h" #include "paddle/fluid/platform/gpu_launch_config.h" using Tensor = paddle::framework::Tensor; using LoDTensor = paddle::framework::LoDTensor; namespace paddle { namespace operators { template <typename T, int BLOCK_SIZE> __device__ void BlockReverse(const T* idata, T* odata, int src_base, int dst_base, int valid_item) { __shared__ T sh_mem[BLOCK_SIZE]; int tx = threadIdx.x; int offset = tx; int in_index = src_base + offset; if (offset >= valid_item) { sh_mem[offset] = 0; } else { int sh_mem_index = BLOCK_SIZE - offset - 1; T data = idata[in_index]; sh_mem[sh_mem_index] = data; } __syncthreads(); int out_index = dst_base - offset; if (offset < valid_item) { int sh_mem_index = BLOCK_SIZE - offset - 1; odata[out_index] = sh_mem[sh_mem_index]; } } template <typename T> __global__ void MatrixRowReverse(const T* matrix_data, T* reverse_data, int reverse_size, int outer_size, int inner_size) { int bx = blockIdx.x; int by = blockIdx.y; int item_per_block = 1024; for (int block_offset = 0; block_offset < reverse_size; block_offset += item_per_block) { int valid_item = (reverse_size - block_offset > item_per_block) ? item_per_block : reverse_size - block_offset; int src_offset = bx * reverse_size + block_offset + by * (inner_size * reverse_size); int dst_offset = bx * reverse_size + by * (inner_size * reverse_size) + reverse_size - 1 - block_offset; if (reverse_size < item_per_block) { valid_item = reverse_size; } BlockReverse<T, 1024>(matrix_data, reverse_data, src_offset, dst_offset, valid_item); } } template <typename T> struct BlockPrefixCallbackOp { // Running prefix T running_total; // Constructor __device__ BlockPrefixCallbackOp(T running_total) : running_total(running_total) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide // scan. __device__ T operator()(T block_aggregate) { T old_prefix = running_total; running_total = old_prefix + block_aggregate; return old_prefix; } }; // No bank-conflict transpose // Same as transposeCoalesced except the first tile dimension is padded // to avoid shared memory bank conflicts. template <typename T, int TILE_DIM, int BLOCK_ROWS> __global__ void MatrixTranspose(T* odata, const T* idata, size_t height, size_t width) { __shared__ T tile[TILE_DIM][TILE_DIM + 1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { if (x < width && (y + j) < height) { tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x]; } else { tile[threadIdx.y + j][threadIdx.x] = 0; } } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { if (x < height && (y + j) < width) { odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j]; } } } template <typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void BlockScanKernel(T* d_out, const T* d_in, int inner_size, int outer_size, int scan_size, bool exclusive) { // Specialize BlockLoad, BlockStore, and BlockRadixSort collective types typedef cub::BlockLoad<T, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_LOAD_TRANSPOSE> BlockLoadT; typedef cub::BlockStore<T, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_STORE_TRANSPOSE> BlockStoreT; typedef hipcub::BlockScan<T, BLOCK_THREADS> BlockScanT; // Allocate type-safe, repurposable shared memory for collectives __shared__ union { typename BlockLoadT::TempStorage load; typename BlockStoreT::TempStorage store; typename BlockScanT::TempStorage scan; } temp_storage; int bx = blockIdx.x; int by = blockIdx.y; BlockPrefixCallbackOp<T> prefix_op(0); T block_aggregate = static_cast<T>(0); // Obtain this block's segment of consecutive keys (blocked across threads) int item_per_block = BLOCK_THREADS * ITEMS_PER_THREAD; for (int block_offset = 0; block_offset < scan_size; block_offset += BLOCK_THREADS * ITEMS_PER_THREAD) { int valid_item = (scan_size - block_offset > item_per_block) ? item_per_block : (scan_size - block_offset); if (scan_size < item_per_block) { valid_item = scan_size; } int offset = bx * scan_size + block_offset + by * (inner_size * scan_size); T thread_keys[ITEMS_PER_THREAD]; BlockLoadT(temp_storage.load) .Load(d_in + offset, thread_keys, valid_item, 0); __syncthreads(); if (exclusive) { T init_value = static_cast<T>(0); BlockScanT(temp_storage.scan) .ExclusiveScan(thread_keys, thread_keys, hipcub::Sum(), prefix_op); } else { BlockScanT(temp_storage.scan) .InclusiveScan(thread_keys, thread_keys, hipcub::Sum(), prefix_op); } __syncthreads(); BlockStoreT(temp_storage.store) .Store(d_out + offset, thread_keys, valid_item); } } template <typename DeviceContext, typename T> class CumCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<framework::Tensor>("X"); auto* out = context.Output<framework::Tensor>("Out"); int axis = context.Attr<int>("axis"); bool exclusive = context.Attr<bool>("exclusive"); bool reverse = context.Attr<bool>("reverse"); auto out_dims = out->dims(); auto size = in->numel(); PADDLE_ENFORCE_EQ( axis < out_dims.size() && axis >= (0 - out_dims.size()), true, platform::errors::OutOfRange( "Attr(axis) is out of range, It's expected " "to be in range of [-%d, %d]. But received Attr(axis) = %d.", out_dims.size(), out_dims.size() - 1, axis)); if (axis < 0) { axis += out_dims.size(); } T* out_data = out->mutable_data<T>(context.GetPlace()); const T* in_data = in->data<T>(); // Use thrust for parallel acceleration when the input size is equal to the // length of the axis dimension. if (size == out_dims[axis]) { if (reverse) { thrust::device_ptr<const T> dev_ptr = thrust::device_pointer_cast(in_data); thrust::device_vector<T> vec(dev_ptr, dev_ptr + size); if (exclusive) { thrust::exclusive_scan(thrust::device, vec.rbegin(), vec.rend(), out_data); } else { thrust::inclusive_scan(thrust::device, vec.rbegin(), vec.rend(), out_data); } thrust::reverse(thrust::device, out_data, out_data + size); } else { if (exclusive) { thrust::exclusive_scan(thrust::device, in_data, in_data + size, out_data); } else { thrust::inclusive_scan(thrust::device, in_data, in_data + size, out_data); } } return; } size_t height = 1; size_t width = 1; for (size_t i = 0; i <= axis; i++) { height *= out_dims[i]; } for (size_t i = axis + 1; i < out_dims.size(); i++) { width *= out_dims[i]; } int scan_size = out_dims[axis]; bool transpose = (axis != out_dims.size() - 1); int tile_size = 32; dim3 blocks(32, 8); dim3 transpose_grids((width + tile_size - 1) / tile_size, (height + tile_size - 1) / tile_size); auto& dev_ctx = context.template device_context<DeviceContext>(); Tensor tmp; tmp.Resize(out_dims); auto* tmp_data = tmp.mutable_data<T>(context.GetPlace()); T* next_in_data = out_data; T* next_out_data = tmp_data; if (transpose) { hipLaunchKernelGGL(( MatrixTranspose<T, 32, 8>), dim3(transpose_grids), dim3(blocks), 0, dev_ctx.stream(), out_data, in_data, height, width); next_in_data = out_data; next_out_data = tmp_data; } auto swap_ptr = [](T*& ptr1, T*& ptr2) { T* tmp = ptr2; ptr2 = ptr1; ptr1 = tmp; }; int outer_size = height / scan_size; int inner_size = width; // Consider the size of shared memory, here block size is 128 dim3 scan_grid(outer_size, inner_size); dim3 reverse_grid = scan_grid; if (reverse) { if (transpose) { reverse_grid.x = scan_grid.y; reverse_grid.y = scan_grid.x; hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(), next_in_data, next_out_data, scan_size, outer_size, inner_size); if (!transpose) next_in_data = tmp_data; swap_ptr(next_in_data, next_out_data); } else { hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(), in_data, out_data, scan_size, outer_size, inner_size); } } if (!transpose && !reverse) { hipLaunchKernelGGL(( BlockScanKernel<T, 128, 4>), dim3(scan_grid), dim3(128), 0, dev_ctx.stream(), out_data, in_data, outer_size, inner_size, scan_size, exclusive); } else { hipLaunchKernelGGL(( BlockScanKernel<T, 128, 4>), dim3(scan_grid), dim3(128), 0, dev_ctx.stream(), next_out_data, next_in_data, outer_size, inner_size, scan_size, exclusive); } swap_ptr(next_in_data, next_out_data); if (reverse) { hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(), next_in_data, next_out_data, scan_size, outer_size, inner_size); swap_ptr(next_in_data, next_out_data); } if (transpose) { transpose_grids.x = (height + tile_size - 1) / tile_size; transpose_grids.y = (width + tile_size - 1) / tile_size; hipLaunchKernelGGL(( MatrixTranspose<T, 32, 8>), dim3(transpose_grids), dim3(blocks), 0, dev_ctx.stream(), next_out_data, next_in_data, width, height); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( cumsum, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, float>, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, double>, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int>, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
4c77fceac13fcfad31cfcfae7574ac62ef8caed9.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/gather.h> #include <thrust/reverse.h> #include <thrust/scan.h> #include "cub/cub.cuh" #include "paddle/fluid/operators/cum_op.h" #include "paddle/fluid/platform/gpu_launch_config.h" using Tensor = paddle::framework::Tensor; using LoDTensor = paddle::framework::LoDTensor; namespace paddle { namespace operators { template <typename T, int BLOCK_SIZE> __device__ void BlockReverse(const T* idata, T* odata, int src_base, int dst_base, int valid_item) { __shared__ T sh_mem[BLOCK_SIZE]; int tx = threadIdx.x; int offset = tx; int in_index = src_base + offset; if (offset >= valid_item) { sh_mem[offset] = 0; } else { int sh_mem_index = BLOCK_SIZE - offset - 1; T data = idata[in_index]; sh_mem[sh_mem_index] = data; } __syncthreads(); int out_index = dst_base - offset; if (offset < valid_item) { int sh_mem_index = BLOCK_SIZE - offset - 1; odata[out_index] = sh_mem[sh_mem_index]; } } template <typename T> __global__ void MatrixRowReverse(const T* matrix_data, T* reverse_data, int reverse_size, int outer_size, int inner_size) { int bx = blockIdx.x; int by = blockIdx.y; int item_per_block = 1024; for (int block_offset = 0; block_offset < reverse_size; block_offset += item_per_block) { int valid_item = (reverse_size - block_offset > item_per_block) ? item_per_block : reverse_size - block_offset; int src_offset = bx * reverse_size + block_offset + by * (inner_size * reverse_size); int dst_offset = bx * reverse_size + by * (inner_size * reverse_size) + reverse_size - 1 - block_offset; if (reverse_size < item_per_block) { valid_item = reverse_size; } BlockReverse<T, 1024>(matrix_data, reverse_data, src_offset, dst_offset, valid_item); } } template <typename T> struct BlockPrefixCallbackOp { // Running prefix T running_total; // Constructor __device__ BlockPrefixCallbackOp(T running_total) : running_total(running_total) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide // scan. __device__ T operator()(T block_aggregate) { T old_prefix = running_total; running_total = old_prefix + block_aggregate; return old_prefix; } }; // No bank-conflict transpose // Same as transposeCoalesced except the first tile dimension is padded // to avoid shared memory bank conflicts. template <typename T, int TILE_DIM, int BLOCK_ROWS> __global__ void MatrixTranspose(T* odata, const T* idata, size_t height, size_t width) { __shared__ T tile[TILE_DIM][TILE_DIM + 1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { if (x < width && (y + j) < height) { tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x]; } else { tile[threadIdx.y + j][threadIdx.x] = 0; } } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { if (x < height && (y + j) < width) { odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j]; } } } template <typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void BlockScanKernel(T* d_out, const T* d_in, int inner_size, int outer_size, int scan_size, bool exclusive) { // Specialize BlockLoad, BlockStore, and BlockRadixSort collective types typedef cub::BlockLoad<T, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_LOAD_TRANSPOSE> BlockLoadT; typedef cub::BlockStore<T, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_STORE_TRANSPOSE> BlockStoreT; typedef cub::BlockScan<T, BLOCK_THREADS> BlockScanT; // Allocate type-safe, repurposable shared memory for collectives __shared__ union { typename BlockLoadT::TempStorage load; typename BlockStoreT::TempStorage store; typename BlockScanT::TempStorage scan; } temp_storage; int bx = blockIdx.x; int by = blockIdx.y; BlockPrefixCallbackOp<T> prefix_op(0); T block_aggregate = static_cast<T>(0); // Obtain this block's segment of consecutive keys (blocked across threads) int item_per_block = BLOCK_THREADS * ITEMS_PER_THREAD; for (int block_offset = 0; block_offset < scan_size; block_offset += BLOCK_THREADS * ITEMS_PER_THREAD) { int valid_item = (scan_size - block_offset > item_per_block) ? item_per_block : (scan_size - block_offset); if (scan_size < item_per_block) { valid_item = scan_size; } int offset = bx * scan_size + block_offset + by * (inner_size * scan_size); T thread_keys[ITEMS_PER_THREAD]; BlockLoadT(temp_storage.load) .Load(d_in + offset, thread_keys, valid_item, 0); __syncthreads(); if (exclusive) { T init_value = static_cast<T>(0); BlockScanT(temp_storage.scan) .ExclusiveScan(thread_keys, thread_keys, cub::Sum(), prefix_op); } else { BlockScanT(temp_storage.scan) .InclusiveScan(thread_keys, thread_keys, cub::Sum(), prefix_op); } __syncthreads(); BlockStoreT(temp_storage.store) .Store(d_out + offset, thread_keys, valid_item); } } template <typename DeviceContext, typename T> class CumCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<framework::Tensor>("X"); auto* out = context.Output<framework::Tensor>("Out"); int axis = context.Attr<int>("axis"); bool exclusive = context.Attr<bool>("exclusive"); bool reverse = context.Attr<bool>("reverse"); auto out_dims = out->dims(); auto size = in->numel(); PADDLE_ENFORCE_EQ( axis < out_dims.size() && axis >= (0 - out_dims.size()), true, platform::errors::OutOfRange( "Attr(axis) is out of range, It's expected " "to be in range of [-%d, %d]. But received Attr(axis) = %d.", out_dims.size(), out_dims.size() - 1, axis)); if (axis < 0) { axis += out_dims.size(); } T* out_data = out->mutable_data<T>(context.GetPlace()); const T* in_data = in->data<T>(); // Use thrust for parallel acceleration when the input size is equal to the // length of the ‘axis’ dimension. if (size == out_dims[axis]) { if (reverse) { thrust::device_ptr<const T> dev_ptr = thrust::device_pointer_cast(in_data); thrust::device_vector<T> vec(dev_ptr, dev_ptr + size); if (exclusive) { thrust::exclusive_scan(thrust::device, vec.rbegin(), vec.rend(), out_data); } else { thrust::inclusive_scan(thrust::device, vec.rbegin(), vec.rend(), out_data); } thrust::reverse(thrust::device, out_data, out_data + size); } else { if (exclusive) { thrust::exclusive_scan(thrust::device, in_data, in_data + size, out_data); } else { thrust::inclusive_scan(thrust::device, in_data, in_data + size, out_data); } } return; } size_t height = 1; size_t width = 1; for (size_t i = 0; i <= axis; i++) { height *= out_dims[i]; } for (size_t i = axis + 1; i < out_dims.size(); i++) { width *= out_dims[i]; } int scan_size = out_dims[axis]; bool transpose = (axis != out_dims.size() - 1); int tile_size = 32; dim3 blocks(32, 8); dim3 transpose_grids((width + tile_size - 1) / tile_size, (height + tile_size - 1) / tile_size); auto& dev_ctx = context.template device_context<DeviceContext>(); Tensor tmp; tmp.Resize(out_dims); auto* tmp_data = tmp.mutable_data<T>(context.GetPlace()); T* next_in_data = out_data; T* next_out_data = tmp_data; if (transpose) { MatrixTranspose<T, 32, 8><<<transpose_grids, blocks, 0, dev_ctx.stream()>>>( out_data, in_data, height, width); next_in_data = out_data; next_out_data = tmp_data; } auto swap_ptr = [](T*& ptr1, T*& ptr2) { T* tmp = ptr2; ptr2 = ptr1; ptr1 = tmp; }; int outer_size = height / scan_size; int inner_size = width; // Consider the size of shared memory, here block size is 128 dim3 scan_grid(outer_size, inner_size); dim3 reverse_grid = scan_grid; if (reverse) { if (transpose) { reverse_grid.x = scan_grid.y; reverse_grid.y = scan_grid.x; MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>( next_in_data, next_out_data, scan_size, outer_size, inner_size); if (!transpose) next_in_data = tmp_data; swap_ptr(next_in_data, next_out_data); } else { MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>( in_data, out_data, scan_size, outer_size, inner_size); } } if (!transpose && !reverse) { BlockScanKernel<T, 128, 4><<<scan_grid, 128, 0, dev_ctx.stream()>>>( out_data, in_data, outer_size, inner_size, scan_size, exclusive); } else { BlockScanKernel<T, 128, 4><<<scan_grid, 128, 0, dev_ctx.stream()>>>( next_out_data, next_in_data, outer_size, inner_size, scan_size, exclusive); } swap_ptr(next_in_data, next_out_data); if (reverse) { MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>( next_in_data, next_out_data, scan_size, outer_size, inner_size); swap_ptr(next_in_data, next_out_data); } if (transpose) { transpose_grids.x = (height + tile_size - 1) / tile_size; transpose_grids.y = (width + tile_size - 1) / tile_size; MatrixTranspose<T, 32, 8><<<transpose_grids, blocks, 0, dev_ctx.stream()>>>( next_out_data, next_in_data, width, height); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( cumsum, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, float>, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, double>, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int>, ops::CumCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);